Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
yjh00's picture
Model save
6a542a0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 500.0,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.5,
"grad_norm": 8.514540942565027,
"learning_rate": 5e-06,
"loss": 0.585,
"step": 5
},
{
"epoch": 5.0,
"grad_norm": 3.9267725944427414,
"learning_rate": 1e-05,
"loss": 0.4444,
"step": 10
},
{
"epoch": 7.5,
"grad_norm": 1.359630389285586,
"learning_rate": 1.5e-05,
"loss": 0.1918,
"step": 15
},
{
"epoch": 10.0,
"grad_norm": 0.8675984069699776,
"learning_rate": 2e-05,
"loss": 0.0255,
"step": 20
},
{
"epoch": 12.5,
"grad_norm": 1.3166063968433408,
"learning_rate": 2.5e-05,
"loss": 0.0094,
"step": 25
},
{
"epoch": 15.0,
"grad_norm": 1.4298321029000574,
"learning_rate": 3e-05,
"loss": 0.0066,
"step": 30
},
{
"epoch": 17.5,
"grad_norm": 0.39016346214571596,
"learning_rate": 3.5e-05,
"loss": 0.0054,
"step": 35
},
{
"epoch": 20.0,
"grad_norm": 0.2790201303316679,
"learning_rate": 4e-05,
"loss": 0.0062,
"step": 40
},
{
"epoch": 22.5,
"grad_norm": 0.08501619599523778,
"learning_rate": 4.5e-05,
"loss": 0.004,
"step": 45
},
{
"epoch": 25.0,
"grad_norm": 0.062067934755517584,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 50
},
{
"epoch": 27.5,
"grad_norm": 0.052666305930186126,
"learning_rate": 4.9996924362330555e-05,
"loss": 0.0028,
"step": 55
},
{
"epoch": 30.0,
"grad_norm": 0.03938136111188392,
"learning_rate": 4.998769829017084e-05,
"loss": 0.0028,
"step": 60
},
{
"epoch": 32.5,
"grad_norm": 0.03394672644933696,
"learning_rate": 4.997232430583686e-05,
"loss": 0.0028,
"step": 65
},
{
"epoch": 35.0,
"grad_norm": 0.037661243334742,
"learning_rate": 4.995080661242243e-05,
"loss": 0.0028,
"step": 70
},
{
"epoch": 37.5,
"grad_norm": 0.039184906370568255,
"learning_rate": 4.992315109265007e-05,
"loss": 0.0027,
"step": 75
},
{
"epoch": 40.0,
"grad_norm": 0.04435589843124744,
"learning_rate": 4.988936530726276e-05,
"loss": 0.0028,
"step": 80
},
{
"epoch": 42.5,
"grad_norm": 0.02657151726999846,
"learning_rate": 4.984945849295686e-05,
"loss": 0.0026,
"step": 85
},
{
"epoch": 45.0,
"grad_norm": 0.03472385835097059,
"learning_rate": 4.980344155985695e-05,
"loss": 0.0027,
"step": 90
},
{
"epoch": 47.5,
"grad_norm": 0.031070975559645563,
"learning_rate": 4.975132708853304e-05,
"loss": 0.0026,
"step": 95
},
{
"epoch": 50.0,
"grad_norm": 0.03730805984645713,
"learning_rate": 4.9693129326561254e-05,
"loss": 0.0027,
"step": 100
},
{
"epoch": 52.5,
"grad_norm": 0.030401417138752773,
"learning_rate": 4.96288641846286e-05,
"loss": 0.0027,
"step": 105
},
{
"epoch": 55.0,
"grad_norm": 0.030056720425659458,
"learning_rate": 4.955854923218321e-05,
"loss": 0.0027,
"step": 110
},
{
"epoch": 57.5,
"grad_norm": 0.02714242610959012,
"learning_rate": 4.948220369263099e-05,
"loss": 0.0025,
"step": 115
},
{
"epoch": 60.0,
"grad_norm": 0.021468304046160087,
"learning_rate": 4.939984843808013e-05,
"loss": 0.0026,
"step": 120
},
{
"epoch": 62.5,
"grad_norm": 0.027579476441235425,
"learning_rate": 4.931150598363494e-05,
"loss": 0.0027,
"step": 125
},
{
"epoch": 65.0,
"grad_norm": 0.041471384473431336,
"learning_rate": 4.921720048124035e-05,
"loss": 0.0028,
"step": 130
},
{
"epoch": 67.5,
"grad_norm": 0.030743428074535287,
"learning_rate": 4.9116957713079084e-05,
"loss": 0.0027,
"step": 135
},
{
"epoch": 70.0,
"grad_norm": 0.03200739761669791,
"learning_rate": 4.901080508452305e-05,
"loss": 0.0027,
"step": 140
},
{
"epoch": 72.5,
"grad_norm": 0.02266656589663142,
"learning_rate": 4.889877161664096e-05,
"loss": 0.0027,
"step": 145
},
{
"epoch": 75.0,
"grad_norm": 0.03858968870626217,
"learning_rate": 4.878088793826428e-05,
"loss": 0.0026,
"step": 150
},
{
"epoch": 77.5,
"grad_norm": 0.0436229252420695,
"learning_rate": 4.865718627761363e-05,
"loss": 0.0027,
"step": 155
},
{
"epoch": 80.0,
"grad_norm": 0.021505547895787954,
"learning_rate": 4.8527700453487873e-05,
"loss": 0.0026,
"step": 160
},
{
"epoch": 82.5,
"grad_norm": 0.020985544413640905,
"learning_rate": 4.839246586601841e-05,
"loss": 0.0027,
"step": 165
},
{
"epoch": 85.0,
"grad_norm": 0.023894609006279273,
"learning_rate": 4.825151948699116e-05,
"loss": 0.0028,
"step": 170
},
{
"epoch": 87.5,
"grad_norm": 0.02603189523521318,
"learning_rate": 4.8104899849738795e-05,
"loss": 0.0026,
"step": 175
},
{
"epoch": 90.0,
"grad_norm": 0.033072587670050196,
"learning_rate": 4.795264703860616e-05,
"loss": 0.0027,
"step": 180
},
{
"epoch": 92.5,
"grad_norm": 0.029455707311945873,
"learning_rate": 4.779480267799158e-05,
"loss": 0.0027,
"step": 185
},
{
"epoch": 95.0,
"grad_norm": 0.031066744462249135,
"learning_rate": 4.763140992096718e-05,
"loss": 0.0027,
"step": 190
},
{
"epoch": 97.5,
"grad_norm": 0.03179467575788789,
"learning_rate": 4.7462513437481266e-05,
"loss": 0.0027,
"step": 195
},
{
"epoch": 100.0,
"grad_norm": 0.04391757622955937,
"learning_rate": 4.7288159402146e-05,
"loss": 0.0027,
"step": 200
},
{
"epoch": 102.5,
"grad_norm": 0.029394885252518814,
"learning_rate": 4.7108395481613736e-05,
"loss": 0.0028,
"step": 205
},
{
"epoch": 105.0,
"grad_norm": 0.027320800240786685,
"learning_rate": 4.692327082154542e-05,
"loss": 0.0027,
"step": 210
},
{
"epoch": 107.5,
"grad_norm": 0.030873484414188097,
"learning_rate": 4.6732836033174634e-05,
"loss": 0.0027,
"step": 215
},
{
"epoch": 110.0,
"grad_norm": 0.048697733723929194,
"learning_rate": 4.653714317947105e-05,
"loss": 0.0027,
"step": 220
},
{
"epoch": 112.5,
"grad_norm": 0.025217762723189756,
"learning_rate": 4.6336245760906896e-05,
"loss": 0.0026,
"step": 225
},
{
"epoch": 115.0,
"grad_norm": 0.034325242650264584,
"learning_rate": 4.6130198700830455e-05,
"loss": 0.0027,
"step": 230
},
{
"epoch": 117.5,
"grad_norm": 0.0392844047749773,
"learning_rate": 4.591905833045059e-05,
"loss": 0.0027,
"step": 235
},
{
"epoch": 120.0,
"grad_norm": 0.02965200107252582,
"learning_rate": 4.570288237343632e-05,
"loss": 0.0026,
"step": 240
},
{
"epoch": 122.5,
"grad_norm": 0.020210067730727373,
"learning_rate": 4.5481729930135805e-05,
"loss": 0.0026,
"step": 245
},
{
"epoch": 125.0,
"grad_norm": 0.027623657423726022,
"learning_rate": 4.5255661461418854e-05,
"loss": 0.0026,
"step": 250
},
{
"epoch": 127.5,
"grad_norm": 0.023255879069235798,
"learning_rate": 4.502473877214754e-05,
"loss": 0.0027,
"step": 255
},
{
"epoch": 130.0,
"grad_norm": 0.014768241964023569,
"learning_rate": 4.478902499427933e-05,
"loss": 0.0027,
"step": 260
},
{
"epoch": 132.5,
"grad_norm": 0.03668790355624439,
"learning_rate": 4.454858456960754e-05,
"loss": 0.0027,
"step": 265
},
{
"epoch": 135.0,
"grad_norm": 0.028968111779672595,
"learning_rate": 4.430348323214347e-05,
"loss": 0.0026,
"step": 270
},
{
"epoch": 137.5,
"grad_norm": 0.02287695653834384,
"learning_rate": 4.4053787990145465e-05,
"loss": 0.0027,
"step": 275
},
{
"epoch": 140.0,
"grad_norm": 0.014260556347040123,
"learning_rate": 4.379956710779951e-05,
"loss": 0.0027,
"step": 280
},
{
"epoch": 142.5,
"grad_norm": 0.029771289364298407,
"learning_rate": 4.3540890086556435e-05,
"loss": 0.0026,
"step": 285
},
{
"epoch": 145.0,
"grad_norm": 0.03788961751665883,
"learning_rate": 4.327782764613099e-05,
"loss": 0.0027,
"step": 290
},
{
"epoch": 147.5,
"grad_norm": 0.03971119187487509,
"learning_rate": 4.301045170516773e-05,
"loss": 0.0026,
"step": 295
},
{
"epoch": 150.0,
"grad_norm": 0.03370808514177945,
"learning_rate": 4.2738835361579175e-05,
"loss": 0.0026,
"step": 300
},
{
"epoch": 152.5,
"grad_norm": 0.04042652697228835,
"learning_rate": 4.2463052872561584e-05,
"loss": 0.0027,
"step": 305
},
{
"epoch": 155.0,
"grad_norm": 0.03521656241677526,
"learning_rate": 4.2183179634293794e-05,
"loss": 0.0028,
"step": 310
},
{
"epoch": 157.5,
"grad_norm": 0.03092781528226213,
"learning_rate": 4.1899292161324627e-05,
"loss": 0.0026,
"step": 315
},
{
"epoch": 160.0,
"grad_norm": 0.020310997173564212,
"learning_rate": 4.1611468065654586e-05,
"loss": 0.0026,
"step": 320
},
{
"epoch": 162.5,
"grad_norm": 0.015790567180341695,
"learning_rate": 4.1319786035517534e-05,
"loss": 0.0027,
"step": 325
},
{
"epoch": 165.0,
"grad_norm": 0.0194147951616345,
"learning_rate": 4.1024325813868065e-05,
"loss": 0.0027,
"step": 330
},
{
"epoch": 167.5,
"grad_norm": 0.030730482667734805,
"learning_rate": 4.072516817658065e-05,
"loss": 0.0027,
"step": 335
},
{
"epoch": 170.0,
"grad_norm": 0.029837731953442564,
"learning_rate": 4.0422394910366236e-05,
"loss": 0.0026,
"step": 340
},
{
"epoch": 172.5,
"grad_norm": 0.02492166177013941,
"learning_rate": 4.0116088790412645e-05,
"loss": 0.0026,
"step": 345
},
{
"epoch": 175.0,
"grad_norm": 0.022790129612325603,
"learning_rate": 3.980633355775461e-05,
"loss": 0.0027,
"step": 350
},
{
"epoch": 177.5,
"grad_norm": 0.0367286949474086,
"learning_rate": 3.949321389637986e-05,
"loss": 0.0027,
"step": 355
},
{
"epoch": 180.0,
"grad_norm": 0.026449210914021956,
"learning_rate": 3.917681541007734e-05,
"loss": 0.0025,
"step": 360
},
{
"epoch": 182.5,
"grad_norm": 0.025906355217603764,
"learning_rate": 3.885722459903399e-05,
"loss": 0.0027,
"step": 365
},
{
"epoch": 185.0,
"grad_norm": 0.013510506972149507,
"learning_rate": 3.853452883618644e-05,
"loss": 0.0027,
"step": 370
},
{
"epoch": 187.5,
"grad_norm": 0.02126782273762181,
"learning_rate": 3.8208816343334156e-05,
"loss": 0.0027,
"step": 375
},
{
"epoch": 190.0,
"grad_norm": 0.02778025887186039,
"learning_rate": 3.788017616702048e-05,
"loss": 0.0027,
"step": 380
},
{
"epoch": 192.5,
"grad_norm": 0.02023158670094667,
"learning_rate": 3.754869815418815e-05,
"loss": 0.0026,
"step": 385
},
{
"epoch": 195.0,
"grad_norm": 0.031240526748051056,
"learning_rate": 3.721447292761609e-05,
"loss": 0.0027,
"step": 390
},
{
"epoch": 197.5,
"grad_norm": 0.02268049791605829,
"learning_rate": 3.687759186114403e-05,
"loss": 0.0026,
"step": 395
},
{
"epoch": 200.0,
"grad_norm": 0.02998474683024178,
"learning_rate": 3.6538147054691817e-05,
"loss": 0.0027,
"step": 400
},
{
"epoch": 202.5,
"grad_norm": 0.026430986104193692,
"learning_rate": 3.619623130908018e-05,
"loss": 0.0027,
"step": 405
},
{
"epoch": 205.0,
"grad_norm": 0.010605276850508178,
"learning_rate": 3.5851938100659964e-05,
"loss": 0.0027,
"step": 410
},
{
"epoch": 207.5,
"grad_norm": 0.02068209169464807,
"learning_rate": 3.550536155575662e-05,
"loss": 0.0027,
"step": 415
},
{
"epoch": 210.0,
"grad_norm": 0.03300339570363238,
"learning_rate": 3.515659642493697e-05,
"loss": 0.0026,
"step": 420
},
{
"epoch": 212.5,
"grad_norm": 0.026016657903437497,
"learning_rate": 3.480573805710538e-05,
"loss": 0.0027,
"step": 425
},
{
"epoch": 215.0,
"grad_norm": 0.024770792616580162,
"learning_rate": 3.4452882373436316e-05,
"loss": 0.0026,
"step": 430
},
{
"epoch": 217.5,
"grad_norm": 0.0265444448484458,
"learning_rate": 3.4098125841150466e-05,
"loss": 0.0026,
"step": 435
},
{
"epoch": 220.0,
"grad_norm": 0.01924921498743014,
"learning_rate": 3.37415654471415e-05,
"loss": 0.0026,
"step": 440
},
{
"epoch": 222.5,
"grad_norm": 0.027603952267821173,
"learning_rate": 3.3383298671460944e-05,
"loss": 0.0026,
"step": 445
},
{
"epoch": 225.0,
"grad_norm": 0.0254709928199966,
"learning_rate": 3.3023423460667985e-05,
"loss": 0.0026,
"step": 450
},
{
"epoch": 227.5,
"grad_norm": 0.031183288661866662,
"learning_rate": 3.2662038201051914e-05,
"loss": 0.0027,
"step": 455
},
{
"epoch": 230.0,
"grad_norm": 0.03425285898227886,
"learning_rate": 3.2299241691734304e-05,
"loss": 0.0026,
"step": 460
},
{
"epoch": 232.5,
"grad_norm": 0.028752151160391863,
"learning_rate": 3.1935133117658306e-05,
"loss": 0.0027,
"step": 465
},
{
"epoch": 235.0,
"grad_norm": 0.028976330470022123,
"learning_rate": 3.156981202247248e-05,
"loss": 0.0026,
"step": 470
},
{
"epoch": 237.5,
"grad_norm": 0.026920539664030775,
"learning_rate": 3.1203378281316515e-05,
"loss": 0.0026,
"step": 475
},
{
"epoch": 240.0,
"grad_norm": 0.020989002709489214,
"learning_rate": 3.0835932073516444e-05,
"loss": 0.0027,
"step": 480
},
{
"epoch": 242.5,
"grad_norm": 0.024523095658861207,
"learning_rate": 3.0467573855196558e-05,
"loss": 0.0027,
"step": 485
},
{
"epoch": 245.0,
"grad_norm": 0.015580401724056997,
"learning_rate": 3.0098404331815695e-05,
"loss": 0.0027,
"step": 490
},
{
"epoch": 247.5,
"grad_norm": 0.022996840296875175,
"learning_rate": 2.9728524430635417e-05,
"loss": 0.0027,
"step": 495
},
{
"epoch": 250.0,
"grad_norm": 0.021854877708908236,
"learning_rate": 2.9358035273127483e-05,
"loss": 0.0026,
"step": 500
},
{
"epoch": 252.5,
"grad_norm": 0.025003129400125675,
"learning_rate": 2.8987038147328238e-05,
"loss": 0.0026,
"step": 505
},
{
"epoch": 255.0,
"grad_norm": 0.02759119149124416,
"learning_rate": 2.86156344801475e-05,
"loss": 0.0026,
"step": 510
},
{
"epoch": 257.5,
"grad_norm": 0.024349103530368846,
"learning_rate": 2.824392580963944e-05,
"loss": 0.0027,
"step": 515
},
{
"epoch": 260.0,
"grad_norm": 0.02918200596645593,
"learning_rate": 2.787201375724307e-05,
"loss": 0.0027,
"step": 520
},
{
"epoch": 262.5,
"grad_norm": 0.017661985537795705,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.0026,
"step": 525
},
{
"epoch": 265.0,
"grad_norm": 0.02880881596677379,
"learning_rate": 2.7127986242756936e-05,
"loss": 0.0026,
"step": 530
},
{
"epoch": 267.5,
"grad_norm": 0.04090166133876468,
"learning_rate": 2.6756074190360563e-05,
"loss": 0.0028,
"step": 535
},
{
"epoch": 270.0,
"grad_norm": 0.022838636369005745,
"learning_rate": 2.63843655198525e-05,
"loss": 0.0027,
"step": 540
},
{
"epoch": 272.5,
"grad_norm": 0.03417808915995595,
"learning_rate": 2.6012961852671767e-05,
"loss": 0.0026,
"step": 545
},
{
"epoch": 275.0,
"grad_norm": 0.02391514801170413,
"learning_rate": 2.5641964726872526e-05,
"loss": 0.0026,
"step": 550
},
{
"epoch": 277.5,
"grad_norm": 0.027050966559574396,
"learning_rate": 2.527147556936459e-05,
"loss": 0.0027,
"step": 555
},
{
"epoch": 280.0,
"grad_norm": 0.029392338368449532,
"learning_rate": 2.4901595668184314e-05,
"loss": 0.0027,
"step": 560
},
{
"epoch": 282.5,
"grad_norm": 0.03213320609449678,
"learning_rate": 2.453242614480345e-05,
"loss": 0.0027,
"step": 565
},
{
"epoch": 285.0,
"grad_norm": 0.017580210273689653,
"learning_rate": 2.416406792648355e-05,
"loss": 0.0027,
"step": 570
},
{
"epoch": 287.5,
"grad_norm": 0.025395175371267904,
"learning_rate": 2.3796621718683487e-05,
"loss": 0.0027,
"step": 575
},
{
"epoch": 290.0,
"grad_norm": 0.02830946439085919,
"learning_rate": 2.3430187977527533e-05,
"loss": 0.0027,
"step": 580
},
{
"epoch": 292.5,
"grad_norm": 0.04065072947108163,
"learning_rate": 2.3064866882341696e-05,
"loss": 0.0027,
"step": 585
},
{
"epoch": 295.0,
"grad_norm": 0.028524952343665373,
"learning_rate": 2.27007583082657e-05,
"loss": 0.0026,
"step": 590
},
{
"epoch": 297.5,
"grad_norm": 0.0284550999234347,
"learning_rate": 2.233796179894809e-05,
"loss": 0.0026,
"step": 595
},
{
"epoch": 300.0,
"grad_norm": 0.03018543474602915,
"learning_rate": 2.1976576539332024e-05,
"loss": 0.0026,
"step": 600
},
{
"epoch": 302.5,
"grad_norm": 0.017475284356752212,
"learning_rate": 2.1616701328539057e-05,
"loss": 0.0026,
"step": 605
},
{
"epoch": 305.0,
"grad_norm": 0.02381743267091071,
"learning_rate": 2.1258434552858502e-05,
"loss": 0.0026,
"step": 610
},
{
"epoch": 307.5,
"grad_norm": 0.030282727334382083,
"learning_rate": 2.090187415884955e-05,
"loss": 0.0027,
"step": 615
},
{
"epoch": 310.0,
"grad_norm": 0.02239088576419377,
"learning_rate": 2.054711762656369e-05,
"loss": 0.0026,
"step": 620
},
{
"epoch": 312.5,
"grad_norm": 0.020772310557568244,
"learning_rate": 2.0194261942894628e-05,
"loss": 0.0026,
"step": 625
},
{
"epoch": 315.0,
"grad_norm": 0.03490126977201953,
"learning_rate": 1.984340357506303e-05,
"loss": 0.0026,
"step": 630
},
{
"epoch": 317.5,
"grad_norm": 0.015968447976387346,
"learning_rate": 1.949463844424338e-05,
"loss": 0.0026,
"step": 635
},
{
"epoch": 320.0,
"grad_norm": 0.023986554939275363,
"learning_rate": 1.914806189934003e-05,
"loss": 0.0028,
"step": 640
},
{
"epoch": 322.5,
"grad_norm": 0.03420061182554338,
"learning_rate": 1.8803768690919832e-05,
"loss": 0.0027,
"step": 645
},
{
"epoch": 325.0,
"grad_norm": 0.01792951575408546,
"learning_rate": 1.8461852945308196e-05,
"loss": 0.0027,
"step": 650
},
{
"epoch": 327.5,
"grad_norm": 0.023802622532695907,
"learning_rate": 1.8122408138855974e-05,
"loss": 0.0026,
"step": 655
},
{
"epoch": 330.0,
"grad_norm": 0.023316524889500846,
"learning_rate": 1.778552707238391e-05,
"loss": 0.0026,
"step": 660
},
{
"epoch": 332.5,
"grad_norm": 0.013376585581553524,
"learning_rate": 1.7451301845811857e-05,
"loss": 0.0026,
"step": 665
},
{
"epoch": 335.0,
"grad_norm": 0.019080334175566694,
"learning_rate": 1.711982383297953e-05,
"loss": 0.0026,
"step": 670
},
{
"epoch": 337.5,
"grad_norm": 0.01654436855407368,
"learning_rate": 1.6791183656665846e-05,
"loss": 0.0026,
"step": 675
},
{
"epoch": 340.0,
"grad_norm": 0.022278657387787754,
"learning_rate": 1.6465471163813574e-05,
"loss": 0.0027,
"step": 680
},
{
"epoch": 342.5,
"grad_norm": 0.015375948509190046,
"learning_rate": 1.6142775400966023e-05,
"loss": 0.0027,
"step": 685
},
{
"epoch": 345.0,
"grad_norm": 0.01684675867751184,
"learning_rate": 1.582318458992267e-05,
"loss": 0.0027,
"step": 690
},
{
"epoch": 347.5,
"grad_norm": 0.032002543536054376,
"learning_rate": 1.5506786103620146e-05,
"loss": 0.0027,
"step": 695
},
{
"epoch": 350.0,
"grad_norm": 0.021043035856906086,
"learning_rate": 1.5193666442245402e-05,
"loss": 0.0027,
"step": 700
},
{
"epoch": 352.5,
"grad_norm": 0.01601931955071107,
"learning_rate": 1.4883911209587368e-05,
"loss": 0.0026,
"step": 705
},
{
"epoch": 355.0,
"grad_norm": 0.02601502051924795,
"learning_rate": 1.4577605089633773e-05,
"loss": 0.0026,
"step": 710
},
{
"epoch": 357.5,
"grad_norm": 0.02710347701415514,
"learning_rate": 1.427483182341936e-05,
"loss": 0.0026,
"step": 715
},
{
"epoch": 360.0,
"grad_norm": 0.03320644743977735,
"learning_rate": 1.3975674186131937e-05,
"loss": 0.0027,
"step": 720
},
{
"epoch": 362.5,
"grad_norm": 0.01591474795789328,
"learning_rate": 1.3680213964482475e-05,
"loss": 0.0027,
"step": 725
},
{
"epoch": 365.0,
"grad_norm": 0.026155373607272086,
"learning_rate": 1.3388531934345416e-05,
"loss": 0.0027,
"step": 730
},
{
"epoch": 367.5,
"grad_norm": 0.024271143128982743,
"learning_rate": 1.3100707838675377e-05,
"loss": 0.0026,
"step": 735
},
{
"epoch": 370.0,
"grad_norm": 0.016369678310535946,
"learning_rate": 1.2816820365706206e-05,
"loss": 0.0027,
"step": 740
},
{
"epoch": 372.5,
"grad_norm": 0.016955558044549856,
"learning_rate": 1.2536947127438415e-05,
"loss": 0.0026,
"step": 745
},
{
"epoch": 375.0,
"grad_norm": 0.02239783849742695,
"learning_rate": 1.2261164638420832e-05,
"loss": 0.0026,
"step": 750
},
{
"epoch": 377.5,
"grad_norm": 0.029924950679270815,
"learning_rate": 1.198954829483227e-05,
"loss": 0.0027,
"step": 755
},
{
"epoch": 380.0,
"grad_norm": 0.028621427234193805,
"learning_rate": 1.1722172353869008e-05,
"loss": 0.0025,
"step": 760
},
{
"epoch": 382.5,
"grad_norm": 0.02561136953891063,
"learning_rate": 1.1459109913443567e-05,
"loss": 0.0026,
"step": 765
},
{
"epoch": 385.0,
"grad_norm": 0.028256041486332584,
"learning_rate": 1.12004328922005e-05,
"loss": 0.0027,
"step": 770
},
{
"epoch": 387.5,
"grad_norm": 0.029729889846126924,
"learning_rate": 1.094621200985454e-05,
"loss": 0.0027,
"step": 775
},
{
"epoch": 390.0,
"grad_norm": 0.014383243995550028,
"learning_rate": 1.0696516767856546e-05,
"loss": 0.0026,
"step": 780
},
{
"epoch": 392.5,
"grad_norm": 0.015879736540587025,
"learning_rate": 1.0451415430392474e-05,
"loss": 0.0026,
"step": 785
},
{
"epoch": 395.0,
"grad_norm": 0.024629301532455877,
"learning_rate": 1.0210975005720677e-05,
"loss": 0.0026,
"step": 790
},
{
"epoch": 397.5,
"grad_norm": 0.029707059150893382,
"learning_rate": 9.975261227852472e-06,
"loss": 0.0027,
"step": 795
},
{
"epoch": 400.0,
"grad_norm": 0.0318537391592188,
"learning_rate": 9.744338538581147e-06,
"loss": 0.0027,
"step": 800
},
{
"epoch": 402.5,
"grad_norm": 0.017706015342844352,
"learning_rate": 9.518270069864195e-06,
"loss": 0.0027,
"step": 805
},
{
"epoch": 405.0,
"grad_norm": 0.01724610650030409,
"learning_rate": 9.297117626563687e-06,
"loss": 0.0026,
"step": 810
},
{
"epoch": 407.5,
"grad_norm": 0.018957851028085164,
"learning_rate": 9.080941669549423e-06,
"loss": 0.0026,
"step": 815
},
{
"epoch": 410.0,
"grad_norm": 0.025418915769378568,
"learning_rate": 8.86980129916955e-06,
"loss": 0.0026,
"step": 820
},
{
"epoch": 412.5,
"grad_norm": 0.01583223922193541,
"learning_rate": 8.663754239093109e-06,
"loss": 0.0027,
"step": 825
},
{
"epoch": 415.0,
"grad_norm": 0.027164074218547888,
"learning_rate": 8.462856820528952e-06,
"loss": 0.0026,
"step": 830
},
{
"epoch": 417.5,
"grad_norm": 0.028647280916262126,
"learning_rate": 8.26716396682537e-06,
"loss": 0.0027,
"step": 835
},
{
"epoch": 420.0,
"grad_norm": 0.019553123738546497,
"learning_rate": 8.076729178454588e-06,
"loss": 0.0026,
"step": 840
},
{
"epoch": 422.5,
"grad_norm": 0.025379025955769637,
"learning_rate": 7.89160451838626e-06,
"loss": 0.0027,
"step": 845
},
{
"epoch": 425.0,
"grad_norm": 0.01750943138582632,
"learning_rate": 7.711840597853998e-06,
"loss": 0.0026,
"step": 850
},
{
"epoch": 427.5,
"grad_norm": 0.02520822601658372,
"learning_rate": 7.537486562518735e-06,
"loss": 0.0026,
"step": 855
},
{
"epoch": 430.0,
"grad_norm": 0.01993484997782361,
"learning_rate": 7.368590079032822e-06,
"loss": 0.0026,
"step": 860
},
{
"epoch": 432.5,
"grad_norm": 0.019158523070010834,
"learning_rate": 7.205197322008425e-06,
"loss": 0.0027,
"step": 865
},
{
"epoch": 435.0,
"grad_norm": 0.015239850907125138,
"learning_rate": 7.047352961393844e-06,
"loss": 0.0027,
"step": 870
},
{
"epoch": 437.5,
"grad_norm": 0.020559758942860218,
"learning_rate": 6.8951001502612065e-06,
"loss": 0.0027,
"step": 875
},
{
"epoch": 440.0,
"grad_norm": 0.02381279446911383,
"learning_rate": 6.748480513008844e-06,
"loss": 0.0025,
"step": 880
},
{
"epoch": 442.5,
"grad_norm": 0.014589626300082677,
"learning_rate": 6.607534133981594e-06,
"loss": 0.0026,
"step": 885
},
{
"epoch": 445.0,
"grad_norm": 0.026727121558605703,
"learning_rate": 6.472299546512134e-06,
"loss": 0.0026,
"step": 890
},
{
"epoch": 447.5,
"grad_norm": 0.01580348324753083,
"learning_rate": 6.342813722386374e-06,
"loss": 0.0027,
"step": 895
},
{
"epoch": 450.0,
"grad_norm": 0.02334311857794958,
"learning_rate": 6.219112061735721e-06,
"loss": 0.0026,
"step": 900
},
{
"epoch": 452.5,
"grad_norm": 0.022995449553396018,
"learning_rate": 6.1012283833590465e-06,
"loss": 0.0026,
"step": 905
},
{
"epoch": 455.0,
"grad_norm": 0.03391053587500504,
"learning_rate": 5.989194915476954e-06,
"loss": 0.0027,
"step": 910
},
{
"epoch": 457.5,
"grad_norm": 0.0281194121062883,
"learning_rate": 5.883042286920918e-06,
"loss": 0.0025,
"step": 915
},
{
"epoch": 460.0,
"grad_norm": 0.018983581090637,
"learning_rate": 5.782799518759658e-06,
"loss": 0.0025,
"step": 920
},
{
"epoch": 462.5,
"grad_norm": 0.024476355758836905,
"learning_rate": 5.688494016365067e-06,
"loss": 0.0026,
"step": 925
},
{
"epoch": 465.0,
"grad_norm": 0.023061048781595993,
"learning_rate": 5.600151561919871e-06,
"loss": 0.0027,
"step": 930
},
{
"epoch": 467.5,
"grad_norm": 0.012496893287463113,
"learning_rate": 5.517796307369017e-06,
"loss": 0.0026,
"step": 935
},
{
"epoch": 470.0,
"grad_norm": 0.01991683594417727,
"learning_rate": 5.44145076781679e-06,
"loss": 0.0026,
"step": 940
},
{
"epoch": 472.5,
"grad_norm": 0.028037001206555136,
"learning_rate": 5.371135815371398e-06,
"loss": 0.0027,
"step": 945
},
{
"epoch": 475.0,
"grad_norm": 0.026283232329898983,
"learning_rate": 5.3068706734387484e-06,
"loss": 0.0026,
"step": 950
},
{
"epoch": 477.5,
"grad_norm": 0.026815406305102027,
"learning_rate": 5.248672911466959e-06,
"loss": 0.0026,
"step": 955
},
{
"epoch": 480.0,
"grad_norm": 0.01847886314594556,
"learning_rate": 5.196558440143059e-06,
"loss": 0.0025,
"step": 960
},
{
"epoch": 482.5,
"grad_norm": 0.019370493214584474,
"learning_rate": 5.150541507043143e-06,
"loss": 0.0027,
"step": 965
},
{
"epoch": 485.0,
"grad_norm": 0.021779585772489417,
"learning_rate": 5.110634692737244e-06,
"loss": 0.0026,
"step": 970
},
{
"epoch": 487.5,
"grad_norm": 0.02541037015533656,
"learning_rate": 5.0768489073499295e-06,
"loss": 0.0027,
"step": 975
},
{
"epoch": 490.0,
"grad_norm": 0.02945361459676501,
"learning_rate": 5.049193387577574e-06,
"loss": 0.0026,
"step": 980
},
{
"epoch": 492.5,
"grad_norm": 0.017922772556181753,
"learning_rate": 5.027675694163144e-06,
"loss": 0.0028,
"step": 985
},
{
"epoch": 495.0,
"grad_norm": 0.023023048365518207,
"learning_rate": 5.012301709829164e-06,
"loss": 0.0026,
"step": 990
},
{
"epoch": 497.5,
"grad_norm": 0.020646776127788924,
"learning_rate": 5.003075637669448e-06,
"loss": 0.0027,
"step": 995
},
{
"epoch": 500.0,
"grad_norm": 0.0148034159233786,
"learning_rate": 5e-06,
"loss": 0.0027,
"step": 1000
},
{
"epoch": 500.0,
"step": 1000,
"total_flos": 28457722773504.0,
"train_loss": 0.008928275344893336,
"train_runtime": 1701.2794,
"train_samples_per_second": 18.809,
"train_steps_per_second": 0.588
}
],
"logging_steps": 5,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 28457722773504.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}