1NEYRON1's picture
Rename trainer_state (2).json to trainer_state.json
6f0a122
raw
history blame
31.6 kB
{
"best_global_step": 1500,
"best_metric": 0.6884484673104748,
"best_model_checkpoint": "./results_2/checkpoint-1500",
"epoch": 0.7317073170731707,
"eval_steps": 100,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004878048780487805,
"grad_norm": 0.09305039048194885,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.0173,
"step": 10
},
{
"epoch": 0.00975609756097561,
"grad_norm": 0.09173406660556793,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0167,
"step": 20
},
{
"epoch": 0.014634146341463415,
"grad_norm": 0.09779594093561172,
"learning_rate": 3e-06,
"loss": 0.0165,
"step": 30
},
{
"epoch": 0.01951219512195122,
"grad_norm": 0.1044146940112114,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0144,
"step": 40
},
{
"epoch": 0.024390243902439025,
"grad_norm": 0.09117760509252548,
"learning_rate": 5e-06,
"loss": 0.0156,
"step": 50
},
{
"epoch": 0.02926829268292683,
"grad_norm": 0.08018726110458374,
"learning_rate": 6e-06,
"loss": 0.0149,
"step": 60
},
{
"epoch": 0.03414634146341464,
"grad_norm": 0.1090320348739624,
"learning_rate": 7.000000000000001e-06,
"loss": 0.0152,
"step": 70
},
{
"epoch": 0.03902439024390244,
"grad_norm": 0.09608811885118484,
"learning_rate": 8.000000000000001e-06,
"loss": 0.017,
"step": 80
},
{
"epoch": 0.04390243902439024,
"grad_norm": 0.08458913117647171,
"learning_rate": 9e-06,
"loss": 0.0167,
"step": 90
},
{
"epoch": 0.04878048780487805,
"grad_norm": 0.07629529386758804,
"learning_rate": 1e-05,
"loss": 0.0153,
"step": 100
},
{
"epoch": 0.04878048780487805,
"eval_f1": 0.6863881803425316,
"eval_loss": 0.018864748999476433,
"eval_precision": 0.6192098499790808,
"eval_recall": 0.7699167657550535,
"eval_runtime": 130.1056,
"eval_samples_per_second": 63.026,
"eval_steps_per_second": 0.992,
"step": 100
},
{
"epoch": 0.05365853658536585,
"grad_norm": 0.09111520648002625,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.0164,
"step": 110
},
{
"epoch": 0.05853658536585366,
"grad_norm": 0.09741566330194473,
"learning_rate": 1.2e-05,
"loss": 0.0134,
"step": 120
},
{
"epoch": 0.06341463414634146,
"grad_norm": 0.07199473679065704,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.0154,
"step": 130
},
{
"epoch": 0.06829268292682927,
"grad_norm": 0.08497001975774765,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.0161,
"step": 140
},
{
"epoch": 0.07317073170731707,
"grad_norm": 0.08193599432706833,
"learning_rate": 1.5e-05,
"loss": 0.0147,
"step": 150
},
{
"epoch": 0.07804878048780488,
"grad_norm": 0.10380581766366959,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.018,
"step": 160
},
{
"epoch": 0.08292682926829269,
"grad_norm": 0.11271784454584122,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.0158,
"step": 170
},
{
"epoch": 0.08780487804878048,
"grad_norm": 0.0855347067117691,
"learning_rate": 1.8e-05,
"loss": 0.0166,
"step": 180
},
{
"epoch": 0.09268292682926829,
"grad_norm": 0.09468505531549454,
"learning_rate": 1.9e-05,
"loss": 0.0138,
"step": 190
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.06498368084430695,
"learning_rate": 2e-05,
"loss": 0.0138,
"step": 200
},
{
"epoch": 0.0975609756097561,
"eval_f1": 0.6788566213617548,
"eval_loss": 0.019105251878499985,
"eval_precision": 0.6045863570391873,
"eval_recall": 0.7739298454221165,
"eval_runtime": 126.9518,
"eval_samples_per_second": 64.591,
"eval_steps_per_second": 1.016,
"step": 200
},
{
"epoch": 0.1024390243902439,
"grad_norm": 0.08939243853092194,
"learning_rate": 2.1e-05,
"loss": 0.0177,
"step": 210
},
{
"epoch": 0.1073170731707317,
"grad_norm": 0.0708846002817154,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.0155,
"step": 220
},
{
"epoch": 0.11219512195121951,
"grad_norm": 0.10868319869041443,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.0172,
"step": 230
},
{
"epoch": 0.11707317073170732,
"grad_norm": 0.06580492854118347,
"learning_rate": 2.4e-05,
"loss": 0.0142,
"step": 240
},
{
"epoch": 0.12195121951219512,
"grad_norm": 0.08139664679765701,
"learning_rate": 2.5e-05,
"loss": 0.0181,
"step": 250
},
{
"epoch": 0.12682926829268293,
"grad_norm": 0.07353170961141586,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.0149,
"step": 260
},
{
"epoch": 0.13170731707317074,
"grad_norm": 0.09713993221521378,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.0133,
"step": 270
},
{
"epoch": 0.13658536585365855,
"grad_norm": 0.0805194154381752,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.0187,
"step": 280
},
{
"epoch": 0.14146341463414633,
"grad_norm": 0.11184240132570267,
"learning_rate": 2.9e-05,
"loss": 0.0169,
"step": 290
},
{
"epoch": 0.14634146341463414,
"grad_norm": 0.11055561900138855,
"learning_rate": 3e-05,
"loss": 0.0179,
"step": 300
},
{
"epoch": 0.14634146341463414,
"eval_f1": 0.6787136697277903,
"eval_loss": 0.019281357526779175,
"eval_precision": 0.6091580502215658,
"eval_recall": 0.7662009512485137,
"eval_runtime": 126.6013,
"eval_samples_per_second": 64.77,
"eval_steps_per_second": 1.019,
"step": 300
},
{
"epoch": 0.15121951219512195,
"grad_norm": 0.07896488904953003,
"learning_rate": 3.1e-05,
"loss": 0.0152,
"step": 310
},
{
"epoch": 0.15609756097560976,
"grad_norm": 0.10503584146499634,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.0181,
"step": 320
},
{
"epoch": 0.16097560975609757,
"grad_norm": 0.10049130022525787,
"learning_rate": 3.3e-05,
"loss": 0.0169,
"step": 330
},
{
"epoch": 0.16585365853658537,
"grad_norm": 0.10558082908391953,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.0158,
"step": 340
},
{
"epoch": 0.17073170731707318,
"grad_norm": 0.11450464278459549,
"learning_rate": 3.5e-05,
"loss": 0.0168,
"step": 350
},
{
"epoch": 0.17560975609756097,
"grad_norm": 0.15014208853244781,
"learning_rate": 3.6e-05,
"loss": 0.0145,
"step": 360
},
{
"epoch": 0.18048780487804877,
"grad_norm": 0.06404370069503784,
"learning_rate": 3.7e-05,
"loss": 0.0143,
"step": 370
},
{
"epoch": 0.18536585365853658,
"grad_norm": 0.09269659221172333,
"learning_rate": 3.8e-05,
"loss": 0.018,
"step": 380
},
{
"epoch": 0.1902439024390244,
"grad_norm": 0.09888678789138794,
"learning_rate": 3.9000000000000006e-05,
"loss": 0.0142,
"step": 390
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.16841690242290497,
"learning_rate": 4e-05,
"loss": 0.0207,
"step": 400
},
{
"epoch": 0.1951219512195122,
"eval_f1": 0.670402918255679,
"eval_loss": 0.02026216685771942,
"eval_precision": 0.6053057069285586,
"eval_recall": 0.7511890606420928,
"eval_runtime": 125.682,
"eval_samples_per_second": 65.244,
"eval_steps_per_second": 1.026,
"step": 400
},
{
"epoch": 0.2,
"grad_norm": 0.10583117604255676,
"learning_rate": 4.1e-05,
"loss": 0.0161,
"step": 410
},
{
"epoch": 0.2048780487804878,
"grad_norm": 0.08883402496576309,
"learning_rate": 4.2e-05,
"loss": 0.0167,
"step": 420
},
{
"epoch": 0.2097560975609756,
"grad_norm": 0.08243865519762039,
"learning_rate": 4.3e-05,
"loss": 0.0161,
"step": 430
},
{
"epoch": 0.2146341463414634,
"grad_norm": 0.06643176078796387,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.0163,
"step": 440
},
{
"epoch": 0.21951219512195122,
"grad_norm": 0.05310194939374924,
"learning_rate": 4.5e-05,
"loss": 0.0149,
"step": 450
},
{
"epoch": 0.22439024390243903,
"grad_norm": 0.08369413018226624,
"learning_rate": 4.600000000000001e-05,
"loss": 0.0149,
"step": 460
},
{
"epoch": 0.22926829268292684,
"grad_norm": 0.06853285431861877,
"learning_rate": 4.7e-05,
"loss": 0.0142,
"step": 470
},
{
"epoch": 0.23414634146341465,
"grad_norm": 0.09447991102933884,
"learning_rate": 4.8e-05,
"loss": 0.0188,
"step": 480
},
{
"epoch": 0.23902439024390243,
"grad_norm": 0.09374509006738663,
"learning_rate": 4.9e-05,
"loss": 0.0174,
"step": 490
},
{
"epoch": 0.24390243902439024,
"grad_norm": 0.08009267598390579,
"learning_rate": 5e-05,
"loss": 0.0178,
"step": 500
},
{
"epoch": 0.24390243902439024,
"eval_f1": 0.6727972910165327,
"eval_loss": 0.019859502092003822,
"eval_precision": 0.6080043201728069,
"eval_recall": 0.7530469678953626,
"eval_runtime": 125.7109,
"eval_samples_per_second": 65.229,
"eval_steps_per_second": 1.026,
"step": 500
},
{
"epoch": 0.24878048780487805,
"grad_norm": 0.10110995173454285,
"learning_rate": 4.9911504424778765e-05,
"loss": 0.0176,
"step": 510
},
{
"epoch": 0.25365853658536586,
"grad_norm": 0.13534288108348846,
"learning_rate": 4.982300884955752e-05,
"loss": 0.019,
"step": 520
},
{
"epoch": 0.25853658536585367,
"grad_norm": 0.08551128208637238,
"learning_rate": 4.9734513274336284e-05,
"loss": 0.0156,
"step": 530
},
{
"epoch": 0.2634146341463415,
"grad_norm": 0.10786890238523483,
"learning_rate": 4.964601769911505e-05,
"loss": 0.0141,
"step": 540
},
{
"epoch": 0.2682926829268293,
"grad_norm": 0.10076329857110977,
"learning_rate": 4.955752212389381e-05,
"loss": 0.0175,
"step": 550
},
{
"epoch": 0.2731707317073171,
"grad_norm": 0.10950438678264618,
"learning_rate": 4.946902654867257e-05,
"loss": 0.0204,
"step": 560
},
{
"epoch": 0.2780487804878049,
"grad_norm": 0.14525355398654938,
"learning_rate": 4.938053097345133e-05,
"loss": 0.0188,
"step": 570
},
{
"epoch": 0.28292682926829266,
"grad_norm": 0.10379478335380554,
"learning_rate": 4.929203539823009e-05,
"loss": 0.0172,
"step": 580
},
{
"epoch": 0.28780487804878047,
"grad_norm": 0.11898482590913773,
"learning_rate": 4.9203539823008854e-05,
"loss": 0.0182,
"step": 590
},
{
"epoch": 0.2926829268292683,
"grad_norm": 0.09698235988616943,
"learning_rate": 4.911504424778761e-05,
"loss": 0.0161,
"step": 600
},
{
"epoch": 0.2926829268292683,
"eval_f1": 0.679977521404251,
"eval_loss": 0.01935453712940216,
"eval_precision": 0.612384638285204,
"eval_recall": 0.7643430439952438,
"eval_runtime": 126.1147,
"eval_samples_per_second": 65.02,
"eval_steps_per_second": 1.023,
"step": 600
},
{
"epoch": 0.2975609756097561,
"grad_norm": 0.11621833592653275,
"learning_rate": 4.902654867256637e-05,
"loss": 0.0172,
"step": 610
},
{
"epoch": 0.3024390243902439,
"grad_norm": 0.09230925887823105,
"learning_rate": 4.893805309734513e-05,
"loss": 0.0184,
"step": 620
},
{
"epoch": 0.3073170731707317,
"grad_norm": 0.07670127600431442,
"learning_rate": 4.88495575221239e-05,
"loss": 0.0157,
"step": 630
},
{
"epoch": 0.3121951219512195,
"grad_norm": 0.09875659644603729,
"learning_rate": 4.876106194690266e-05,
"loss": 0.0153,
"step": 640
},
{
"epoch": 0.3170731707317073,
"grad_norm": 0.07239075750112534,
"learning_rate": 4.867256637168142e-05,
"loss": 0.0137,
"step": 650
},
{
"epoch": 0.32195121951219513,
"grad_norm": 0.0903860479593277,
"learning_rate": 4.858407079646018e-05,
"loss": 0.0157,
"step": 660
},
{
"epoch": 0.32682926829268294,
"grad_norm": 0.10013315826654434,
"learning_rate": 4.849557522123894e-05,
"loss": 0.0163,
"step": 670
},
{
"epoch": 0.33170731707317075,
"grad_norm": 0.10445868223905563,
"learning_rate": 4.84070796460177e-05,
"loss": 0.0162,
"step": 680
},
{
"epoch": 0.33658536585365856,
"grad_norm": 0.09091856330633163,
"learning_rate": 4.831858407079646e-05,
"loss": 0.0173,
"step": 690
},
{
"epoch": 0.34146341463414637,
"grad_norm": 0.07674919068813324,
"learning_rate": 4.823008849557522e-05,
"loss": 0.0186,
"step": 700
},
{
"epoch": 0.34146341463414637,
"eval_f1": 0.6647128916943094,
"eval_loss": 0.019945966079831123,
"eval_precision": 0.5865302642796248,
"eval_recall": 0.7669441141498217,
"eval_runtime": 125.0727,
"eval_samples_per_second": 65.562,
"eval_steps_per_second": 1.031,
"step": 700
},
{
"epoch": 0.3463414634146341,
"grad_norm": 0.10548862814903259,
"learning_rate": 4.814159292035398e-05,
"loss": 0.0206,
"step": 710
},
{
"epoch": 0.35121951219512193,
"grad_norm": 0.0873124822974205,
"learning_rate": 4.805309734513275e-05,
"loss": 0.0178,
"step": 720
},
{
"epoch": 0.35609756097560974,
"grad_norm": 0.13294565677642822,
"learning_rate": 4.7964601769911506e-05,
"loss": 0.0171,
"step": 730
},
{
"epoch": 0.36097560975609755,
"grad_norm": 0.09687939286231995,
"learning_rate": 4.787610619469027e-05,
"loss": 0.021,
"step": 740
},
{
"epoch": 0.36585365853658536,
"grad_norm": 0.0876525342464447,
"learning_rate": 4.778761061946903e-05,
"loss": 0.0167,
"step": 750
},
{
"epoch": 0.37073170731707317,
"grad_norm": 0.09069986641407013,
"learning_rate": 4.769911504424779e-05,
"loss": 0.0159,
"step": 760
},
{
"epoch": 0.375609756097561,
"grad_norm": 0.09903474152088165,
"learning_rate": 4.761061946902655e-05,
"loss": 0.0179,
"step": 770
},
{
"epoch": 0.3804878048780488,
"grad_norm": 0.09864596277475357,
"learning_rate": 4.752212389380531e-05,
"loss": 0.0186,
"step": 780
},
{
"epoch": 0.3853658536585366,
"grad_norm": 0.14535708725452423,
"learning_rate": 4.743362831858407e-05,
"loss": 0.0163,
"step": 790
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.09460794180631638,
"learning_rate": 4.734513274336283e-05,
"loss": 0.0188,
"step": 800
},
{
"epoch": 0.3902439024390244,
"eval_f1": 0.6686207562971075,
"eval_loss": 0.019426193088293076,
"eval_precision": 0.5900511654349062,
"eval_recall": 0.7713287752675386,
"eval_runtime": 125.9109,
"eval_samples_per_second": 65.125,
"eval_steps_per_second": 1.025,
"step": 800
},
{
"epoch": 0.3951219512195122,
"grad_norm": 0.10055914521217346,
"learning_rate": 4.7256637168141595e-05,
"loss": 0.0163,
"step": 810
},
{
"epoch": 0.4,
"grad_norm": 0.09273848682641983,
"learning_rate": 4.716814159292036e-05,
"loss": 0.0169,
"step": 820
},
{
"epoch": 0.40487804878048783,
"grad_norm": 0.11031738668680191,
"learning_rate": 4.707964601769912e-05,
"loss": 0.0175,
"step": 830
},
{
"epoch": 0.4097560975609756,
"grad_norm": 0.09007064253091812,
"learning_rate": 4.699115044247788e-05,
"loss": 0.0161,
"step": 840
},
{
"epoch": 0.4146341463414634,
"grad_norm": 0.10067761689424515,
"learning_rate": 4.690265486725664e-05,
"loss": 0.0177,
"step": 850
},
{
"epoch": 0.4195121951219512,
"grad_norm": 0.15128421783447266,
"learning_rate": 4.6814159292035396e-05,
"loss": 0.0183,
"step": 860
},
{
"epoch": 0.424390243902439,
"grad_norm": 0.10395248234272003,
"learning_rate": 4.672566371681416e-05,
"loss": 0.0151,
"step": 870
},
{
"epoch": 0.4292682926829268,
"grad_norm": 0.08134312182664871,
"learning_rate": 4.663716814159292e-05,
"loss": 0.0184,
"step": 880
},
{
"epoch": 0.43414634146341463,
"grad_norm": 0.09317316114902496,
"learning_rate": 4.6548672566371684e-05,
"loss": 0.0177,
"step": 890
},
{
"epoch": 0.43902439024390244,
"grad_norm": 0.09974437952041626,
"learning_rate": 4.646017699115045e-05,
"loss": 0.0164,
"step": 900
},
{
"epoch": 0.43902439024390244,
"eval_f1": 0.6717267552182163,
"eval_loss": 0.01958908513188362,
"eval_precision": 0.6,
"eval_recall": 0.7629310344827587,
"eval_runtime": 125.8067,
"eval_samples_per_second": 65.179,
"eval_steps_per_second": 1.025,
"step": 900
},
{
"epoch": 0.44390243902439025,
"grad_norm": 0.10190931707620621,
"learning_rate": 4.637168141592921e-05,
"loss": 0.0219,
"step": 910
},
{
"epoch": 0.44878048780487806,
"grad_norm": 0.08647562563419342,
"learning_rate": 4.6283185840707966e-05,
"loss": 0.0152,
"step": 920
},
{
"epoch": 0.45365853658536587,
"grad_norm": 0.07675183564424515,
"learning_rate": 4.619469026548673e-05,
"loss": 0.0164,
"step": 930
},
{
"epoch": 0.4585365853658537,
"grad_norm": 0.11960858851671219,
"learning_rate": 4.6106194690265485e-05,
"loss": 0.0172,
"step": 940
},
{
"epoch": 0.4634146341463415,
"grad_norm": 0.09075548499822617,
"learning_rate": 4.601769911504425e-05,
"loss": 0.0159,
"step": 950
},
{
"epoch": 0.4682926829268293,
"grad_norm": 0.05601793900132179,
"learning_rate": 4.592920353982301e-05,
"loss": 0.0171,
"step": 960
},
{
"epoch": 0.47317073170731705,
"grad_norm": 0.0845816358923912,
"learning_rate": 4.584070796460177e-05,
"loss": 0.0185,
"step": 970
},
{
"epoch": 0.47804878048780486,
"grad_norm": 0.08288878947496414,
"learning_rate": 4.5752212389380536e-05,
"loss": 0.017,
"step": 980
},
{
"epoch": 0.48292682926829267,
"grad_norm": 0.1054663434624672,
"learning_rate": 4.56637168141593e-05,
"loss": 0.0148,
"step": 990
},
{
"epoch": 0.4878048780487805,
"grad_norm": 0.08182746171951294,
"learning_rate": 4.5575221238938055e-05,
"loss": 0.0164,
"step": 1000
},
{
"epoch": 0.4878048780487805,
"eval_f1": 0.6684826669260953,
"eval_loss": 0.019462432712316513,
"eval_precision": 0.5930038547839594,
"eval_recall": 0.7659780023781213,
"eval_runtime": 125.7047,
"eval_samples_per_second": 65.232,
"eval_steps_per_second": 1.026,
"step": 1000
},
{
"epoch": 0.4926829268292683,
"grad_norm": 0.09717566519975662,
"learning_rate": 4.548672566371682e-05,
"loss": 0.0182,
"step": 1010
},
{
"epoch": 0.4975609756097561,
"grad_norm": 0.09170341491699219,
"learning_rate": 4.5398230088495574e-05,
"loss": 0.0175,
"step": 1020
},
{
"epoch": 0.5024390243902439,
"grad_norm": 0.08827481418848038,
"learning_rate": 4.5309734513274336e-05,
"loss": 0.0177,
"step": 1030
},
{
"epoch": 0.5073170731707317,
"grad_norm": 0.08792293816804886,
"learning_rate": 4.52212389380531e-05,
"loss": 0.0184,
"step": 1040
},
{
"epoch": 0.5121951219512195,
"grad_norm": 0.13856299221515656,
"learning_rate": 4.5132743362831855e-05,
"loss": 0.0182,
"step": 1050
},
{
"epoch": 0.5170731707317073,
"grad_norm": 0.0926881730556488,
"learning_rate": 4.5044247787610625e-05,
"loss": 0.0177,
"step": 1060
},
{
"epoch": 0.5219512195121951,
"grad_norm": 0.0846000462770462,
"learning_rate": 4.495575221238939e-05,
"loss": 0.0157,
"step": 1070
},
{
"epoch": 0.526829268292683,
"grad_norm": 0.10093377530574799,
"learning_rate": 4.4867256637168144e-05,
"loss": 0.0188,
"step": 1080
},
{
"epoch": 0.5317073170731708,
"grad_norm": 0.09538795799016953,
"learning_rate": 4.4778761061946906e-05,
"loss": 0.0184,
"step": 1090
},
{
"epoch": 0.5365853658536586,
"grad_norm": 0.14831095933914185,
"learning_rate": 4.469026548672566e-05,
"loss": 0.0159,
"step": 1100
},
{
"epoch": 0.5365853658536586,
"eval_f1": 0.6750937261537441,
"eval_loss": 0.020070159807801247,
"eval_precision": 0.6097692538207972,
"eval_recall": 0.7560939357907254,
"eval_runtime": 126.5385,
"eval_samples_per_second": 64.802,
"eval_steps_per_second": 1.019,
"step": 1100
},
{
"epoch": 0.5414634146341464,
"grad_norm": 0.09911943227052689,
"learning_rate": 4.4601769911504425e-05,
"loss": 0.0174,
"step": 1110
},
{
"epoch": 0.5463414634146342,
"grad_norm": 0.07620345056056976,
"learning_rate": 4.451327433628319e-05,
"loss": 0.0157,
"step": 1120
},
{
"epoch": 0.551219512195122,
"grad_norm": 0.09838444739580154,
"learning_rate": 4.4424778761061944e-05,
"loss": 0.0146,
"step": 1130
},
{
"epoch": 0.5560975609756098,
"grad_norm": 0.08282183855772018,
"learning_rate": 4.433628318584071e-05,
"loss": 0.0178,
"step": 1140
},
{
"epoch": 0.5609756097560976,
"grad_norm": 0.09383049607276917,
"learning_rate": 4.4247787610619477e-05,
"loss": 0.0164,
"step": 1150
},
{
"epoch": 0.5658536585365853,
"grad_norm": 0.1041053757071495,
"learning_rate": 4.415929203539823e-05,
"loss": 0.018,
"step": 1160
},
{
"epoch": 0.5707317073170731,
"grad_norm": 0.0997629165649414,
"learning_rate": 4.4070796460176995e-05,
"loss": 0.016,
"step": 1170
},
{
"epoch": 0.5756097560975609,
"grad_norm": 0.1428525149822235,
"learning_rate": 4.398230088495575e-05,
"loss": 0.018,
"step": 1180
},
{
"epoch": 0.5804878048780487,
"grad_norm": 0.1270400732755661,
"learning_rate": 4.3893805309734514e-05,
"loss": 0.0172,
"step": 1190
},
{
"epoch": 0.5853658536585366,
"grad_norm": 0.08549308031797409,
"learning_rate": 4.380530973451328e-05,
"loss": 0.0147,
"step": 1200
},
{
"epoch": 0.5853658536585366,
"eval_f1": 0.6807967485136542,
"eval_loss": 0.019661063328385353,
"eval_precision": 0.621146184492798,
"eval_recall": 0.7531212841854935,
"eval_runtime": 125.9122,
"eval_samples_per_second": 65.125,
"eval_steps_per_second": 1.025,
"step": 1200
},
{
"epoch": 0.5902439024390244,
"grad_norm": 0.05166243761777878,
"learning_rate": 4.371681415929203e-05,
"loss": 0.0167,
"step": 1210
},
{
"epoch": 0.5951219512195122,
"grad_norm": 0.0989900454878807,
"learning_rate": 4.3628318584070796e-05,
"loss": 0.0185,
"step": 1220
},
{
"epoch": 0.6,
"grad_norm": 0.09084061533212662,
"learning_rate": 4.353982300884956e-05,
"loss": 0.0175,
"step": 1230
},
{
"epoch": 0.6048780487804878,
"grad_norm": 0.08588280528783798,
"learning_rate": 4.345132743362832e-05,
"loss": 0.0154,
"step": 1240
},
{
"epoch": 0.6097560975609756,
"grad_norm": 0.07876714318990707,
"learning_rate": 4.3362831858407084e-05,
"loss": 0.0155,
"step": 1250
},
{
"epoch": 0.6146341463414634,
"grad_norm": 0.10766426473855972,
"learning_rate": 4.327433628318584e-05,
"loss": 0.0162,
"step": 1260
},
{
"epoch": 0.6195121951219512,
"grad_norm": 0.08020301163196564,
"learning_rate": 4.31858407079646e-05,
"loss": 0.0174,
"step": 1270
},
{
"epoch": 0.624390243902439,
"grad_norm": 0.11564213037490845,
"learning_rate": 4.3097345132743366e-05,
"loss": 0.0167,
"step": 1280
},
{
"epoch": 0.6292682926829268,
"grad_norm": 0.11487080901861191,
"learning_rate": 4.300884955752212e-05,
"loss": 0.0155,
"step": 1290
},
{
"epoch": 0.6341463414634146,
"grad_norm": 0.09950511902570724,
"learning_rate": 4.2920353982300885e-05,
"loss": 0.0159,
"step": 1300
},
{
"epoch": 0.6341463414634146,
"eval_f1": 0.687796188619635,
"eval_loss": 0.01919134519994259,
"eval_precision": 0.6278456157575014,
"eval_recall": 0.7604042806183116,
"eval_runtime": 125.3379,
"eval_samples_per_second": 65.423,
"eval_steps_per_second": 1.029,
"step": 1300
},
{
"epoch": 0.6390243902439025,
"grad_norm": 0.08389411866664886,
"learning_rate": 4.283185840707965e-05,
"loss": 0.0156,
"step": 1310
},
{
"epoch": 0.6439024390243903,
"grad_norm": 0.08979038149118423,
"learning_rate": 4.274336283185841e-05,
"loss": 0.0173,
"step": 1320
},
{
"epoch": 0.6487804878048781,
"grad_norm": 0.069185771048069,
"learning_rate": 4.265486725663717e-05,
"loss": 0.0178,
"step": 1330
},
{
"epoch": 0.6536585365853659,
"grad_norm": 0.07186874747276306,
"learning_rate": 4.256637168141593e-05,
"loss": 0.0153,
"step": 1340
},
{
"epoch": 0.6585365853658537,
"grad_norm": 0.11203644424676895,
"learning_rate": 4.247787610619469e-05,
"loss": 0.0167,
"step": 1350
},
{
"epoch": 0.6634146341463415,
"grad_norm": 0.11567346006631851,
"learning_rate": 4.2389380530973455e-05,
"loss": 0.017,
"step": 1360
},
{
"epoch": 0.6682926829268293,
"grad_norm": 0.11276240646839142,
"learning_rate": 4.230088495575221e-05,
"loss": 0.0158,
"step": 1370
},
{
"epoch": 0.6731707317073171,
"grad_norm": 0.07067442685365677,
"learning_rate": 4.2212389380530974e-05,
"loss": 0.0156,
"step": 1380
},
{
"epoch": 0.6780487804878049,
"grad_norm": 0.08775879442691803,
"learning_rate": 4.2123893805309737e-05,
"loss": 0.0155,
"step": 1390
},
{
"epoch": 0.6829268292682927,
"grad_norm": 0.07028288394212723,
"learning_rate": 4.20353982300885e-05,
"loss": 0.0142,
"step": 1400
},
{
"epoch": 0.6829268292682927,
"eval_f1": 0.6818793753365644,
"eval_loss": 0.019489064812660217,
"eval_precision": 0.6231545275590551,
"eval_recall": 0.7528240190249703,
"eval_runtime": 125.915,
"eval_samples_per_second": 65.123,
"eval_steps_per_second": 1.025,
"step": 1400
},
{
"epoch": 0.6878048780487804,
"grad_norm": 0.08612460643053055,
"learning_rate": 4.194690265486726e-05,
"loss": 0.0163,
"step": 1410
},
{
"epoch": 0.6926829268292682,
"grad_norm": 0.1031985655426979,
"learning_rate": 4.185840707964602e-05,
"loss": 0.0146,
"step": 1420
},
{
"epoch": 0.697560975609756,
"grad_norm": 0.10174574702978134,
"learning_rate": 4.176991150442478e-05,
"loss": 0.017,
"step": 1430
},
{
"epoch": 0.7024390243902439,
"grad_norm": 0.06620911508798599,
"learning_rate": 4.1681415929203544e-05,
"loss": 0.0171,
"step": 1440
},
{
"epoch": 0.7073170731707317,
"grad_norm": 0.11920668184757233,
"learning_rate": 4.15929203539823e-05,
"loss": 0.0191,
"step": 1450
},
{
"epoch": 0.7121951219512195,
"grad_norm": 0.10696449875831604,
"learning_rate": 4.150442477876106e-05,
"loss": 0.0164,
"step": 1460
},
{
"epoch": 0.7170731707317073,
"grad_norm": 0.09047581255435944,
"learning_rate": 4.1415929203539825e-05,
"loss": 0.0185,
"step": 1470
},
{
"epoch": 0.7219512195121951,
"grad_norm": 0.08915913105010986,
"learning_rate": 4.132743362831858e-05,
"loss": 0.0172,
"step": 1480
},
{
"epoch": 0.7268292682926829,
"grad_norm": 0.10107399523258209,
"learning_rate": 4.123893805309735e-05,
"loss": 0.0188,
"step": 1490
},
{
"epoch": 0.7317073170731707,
"grad_norm": 0.1132921576499939,
"learning_rate": 4.115044247787611e-05,
"loss": 0.0166,
"step": 1500
},
{
"epoch": 0.7317073170731707,
"eval_f1": 0.6884484673104748,
"eval_loss": 0.0191908348351717,
"eval_precision": 0.6290352333517801,
"eval_recall": 0.7602556480380499,
"eval_runtime": 126.3472,
"eval_samples_per_second": 64.901,
"eval_steps_per_second": 1.021,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 6150,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3186928336896000.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}