large_crafting_sft_fail / trainer_state.json
izzcw's picture
End of training
4c12bcd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995964161756397,
"eval_steps": 50,
"global_step": 1548,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006457341189765114,
"grad_norm": 36.13093813298851,
"learning_rate": 6.451612903225807e-07,
"loss": 1.4776,
"step": 10
},
{
"epoch": 0.012914682379530229,
"grad_norm": 10.88271660121199,
"learning_rate": 1.2903225806451614e-06,
"loss": 1.0088,
"step": 20
},
{
"epoch": 0.019372023569295342,
"grad_norm": 8.349376848130536,
"learning_rate": 1.935483870967742e-06,
"loss": 0.7032,
"step": 30
},
{
"epoch": 0.025829364759060457,
"grad_norm": 7.565511440200043,
"learning_rate": 2.580645161290323e-06,
"loss": 0.6113,
"step": 40
},
{
"epoch": 0.03228670594882557,
"grad_norm": 6.9547450559973605,
"learning_rate": 3.225806451612903e-06,
"loss": 0.5429,
"step": 50
},
{
"epoch": 0.03228670594882557,
"eval_loss": 0.4979822635650635,
"eval_runtime": 111.9712,
"eval_samples_per_second": 36.134,
"eval_steps_per_second": 2.26,
"step": 50
},
{
"epoch": 0.038744047138590684,
"grad_norm": 5.37755461346713,
"learning_rate": 3.870967741935484e-06,
"loss": 0.5324,
"step": 60
},
{
"epoch": 0.0452013883283558,
"grad_norm": 5.944625000599463,
"learning_rate": 4.516129032258065e-06,
"loss": 0.5465,
"step": 70
},
{
"epoch": 0.051658729518120915,
"grad_norm": 5.188224377415384,
"learning_rate": 5.161290322580646e-06,
"loss": 0.5395,
"step": 80
},
{
"epoch": 0.058116070707886026,
"grad_norm": 5.29650815819046,
"learning_rate": 5.806451612903226e-06,
"loss": 0.5142,
"step": 90
},
{
"epoch": 0.06457341189765115,
"grad_norm": 5.545565219468533,
"learning_rate": 6.451612903225806e-06,
"loss": 0.5398,
"step": 100
},
{
"epoch": 0.06457341189765115,
"eval_loss": 0.4740481972694397,
"eval_runtime": 111.8977,
"eval_samples_per_second": 36.158,
"eval_steps_per_second": 2.261,
"step": 100
},
{
"epoch": 0.07103075308741626,
"grad_norm": 4.49931443330707,
"learning_rate": 7.096774193548388e-06,
"loss": 0.5428,
"step": 110
},
{
"epoch": 0.07748809427718137,
"grad_norm": 4.966057668727009,
"learning_rate": 7.741935483870968e-06,
"loss": 0.5282,
"step": 120
},
{
"epoch": 0.08394543546694648,
"grad_norm": 4.829422007611953,
"learning_rate": 8.387096774193549e-06,
"loss": 0.559,
"step": 130
},
{
"epoch": 0.0904027766567116,
"grad_norm": 5.817474181181473,
"learning_rate": 9.03225806451613e-06,
"loss": 0.5582,
"step": 140
},
{
"epoch": 0.09686011784647672,
"grad_norm": 8.02692271099152,
"learning_rate": 9.67741935483871e-06,
"loss": 0.5484,
"step": 150
},
{
"epoch": 0.09686011784647672,
"eval_loss": 0.4832856059074402,
"eval_runtime": 111.9347,
"eval_samples_per_second": 36.146,
"eval_steps_per_second": 2.26,
"step": 150
},
{
"epoch": 0.10331745903624183,
"grad_norm": 4.3413736033416725,
"learning_rate": 9.999682112882774e-06,
"loss": 0.5607,
"step": 160
},
{
"epoch": 0.10977480022600694,
"grad_norm": 4.586183180211225,
"learning_rate": 9.997139258465151e-06,
"loss": 0.5586,
"step": 170
},
{
"epoch": 0.11623214141577205,
"grad_norm": 4.268945193260488,
"learning_rate": 9.992054842933846e-06,
"loss": 0.5564,
"step": 180
},
{
"epoch": 0.12268948260553716,
"grad_norm": 3.817348542603216,
"learning_rate": 9.984431452238968e-06,
"loss": 0.5253,
"step": 190
},
{
"epoch": 0.1291468237953023,
"grad_norm": 3.7920034076935067,
"learning_rate": 9.974272963661561e-06,
"loss": 0.5265,
"step": 200
},
{
"epoch": 0.1291468237953023,
"eval_loss": 0.47804030776023865,
"eval_runtime": 111.9585,
"eval_samples_per_second": 36.138,
"eval_steps_per_second": 2.26,
"step": 200
},
{
"epoch": 0.1356041649850674,
"grad_norm": 3.459834130945297,
"learning_rate": 9.961584543841625e-06,
"loss": 0.5329,
"step": 210
},
{
"epoch": 0.1420615061748325,
"grad_norm": 3.5341352790273883,
"learning_rate": 9.946372646150322e-06,
"loss": 0.5534,
"step": 220
},
{
"epoch": 0.14851884736459764,
"grad_norm": 3.6130657979533707,
"learning_rate": 9.928645007407791e-06,
"loss": 0.5365,
"step": 230
},
{
"epoch": 0.15497618855436274,
"grad_norm": 4.682242996257105,
"learning_rate": 9.908410643948162e-06,
"loss": 0.5118,
"step": 240
},
{
"epoch": 0.16143352974412786,
"grad_norm": 3.959992480881294,
"learning_rate": 9.885679847033832e-06,
"loss": 0.5278,
"step": 250
},
{
"epoch": 0.16143352974412786,
"eval_loss": 0.47931385040283203,
"eval_runtime": 111.8916,
"eval_samples_per_second": 36.16,
"eval_steps_per_second": 2.261,
"step": 250
},
{
"epoch": 0.16789087093389296,
"grad_norm": 3.7322960692063765,
"learning_rate": 9.860464177621286e-06,
"loss": 0.5422,
"step": 260
},
{
"epoch": 0.1743482121236581,
"grad_norm": 3.0954290376577713,
"learning_rate": 9.832776460481162e-06,
"loss": 0.4994,
"step": 270
},
{
"epoch": 0.1808055533134232,
"grad_norm": 3.5660372393672395,
"learning_rate": 9.802630777675528e-06,
"loss": 0.5331,
"step": 280
},
{
"epoch": 0.1872628945031883,
"grad_norm": 3.5180745275377667,
"learning_rate": 9.770042461395705e-06,
"loss": 0.5277,
"step": 290
},
{
"epoch": 0.19372023569295344,
"grad_norm": 3.307637994501876,
"learning_rate": 9.735028086164265e-06,
"loss": 0.5259,
"step": 300
},
{
"epoch": 0.19372023569295344,
"eval_loss": 0.45192769169807434,
"eval_runtime": 111.9272,
"eval_samples_per_second": 36.148,
"eval_steps_per_second": 2.26,
"step": 300
},
{
"epoch": 0.20017757688271853,
"grad_norm": 3.618284262141084,
"learning_rate": 9.697605460405162e-06,
"loss": 0.5184,
"step": 310
},
{
"epoch": 0.20663491807248366,
"grad_norm": 3.961931490949851,
"learning_rate": 9.657793617386337e-06,
"loss": 0.5183,
"step": 320
},
{
"epoch": 0.21309225926224876,
"grad_norm": 3.3007761861884823,
"learning_rate": 9.615612805539305e-06,
"loss": 0.5109,
"step": 330
},
{
"epoch": 0.21954960045201388,
"grad_norm": 3.2893563007794673,
"learning_rate": 9.571084478160763e-06,
"loss": 0.5056,
"step": 340
},
{
"epoch": 0.226006941641779,
"grad_norm": 3.363703184254249,
"learning_rate": 9.524231282501362e-06,
"loss": 0.5293,
"step": 350
},
{
"epoch": 0.226006941641779,
"eval_loss": 0.4497073292732239,
"eval_runtime": 111.9003,
"eval_samples_per_second": 36.157,
"eval_steps_per_second": 2.261,
"step": 350
},
{
"epoch": 0.2324642828315441,
"grad_norm": 3.5950061797093453,
"learning_rate": 9.475077048247243e-06,
"loss": 0.4857,
"step": 360
},
{
"epoch": 0.23892162402130923,
"grad_norm": 4.195317979395103,
"learning_rate": 9.423646775400186e-06,
"loss": 0.5132,
"step": 370
},
{
"epoch": 0.24537896521107433,
"grad_norm": 3.3932901420797026,
"learning_rate": 9.369966621562525e-06,
"loss": 0.494,
"step": 380
},
{
"epoch": 0.2518363064008394,
"grad_norm": 3.201620850435473,
"learning_rate": 9.314063888633312e-06,
"loss": 0.5045,
"step": 390
},
{
"epoch": 0.2582936475906046,
"grad_norm": 3.0140805846591756,
"learning_rate": 9.255967008922475e-06,
"loss": 0.5098,
"step": 400
},
{
"epoch": 0.2582936475906046,
"eval_loss": 0.43027767539024353,
"eval_runtime": 111.936,
"eval_samples_per_second": 36.146,
"eval_steps_per_second": 2.26,
"step": 400
},
{
"epoch": 0.2647509887803697,
"grad_norm": 2.947263578735693,
"learning_rate": 9.19570553069006e-06,
"loss": 0.5018,
"step": 410
},
{
"epoch": 0.2712083299701348,
"grad_norm": 3.27976180101635,
"learning_rate": 9.133310103117891e-06,
"loss": 0.4794,
"step": 420
},
{
"epoch": 0.27766567115989993,
"grad_norm": 3.028690497485754,
"learning_rate": 9.068812460721296e-06,
"loss": 0.477,
"step": 430
},
{
"epoch": 0.284123012349665,
"grad_norm": 9.22472978885445,
"learning_rate": 9.002245407208831e-06,
"loss": 0.5121,
"step": 440
},
{
"epoch": 0.2905803535394301,
"grad_norm": 3.2389742570843487,
"learning_rate": 8.9336427987982e-06,
"loss": 0.482,
"step": 450
},
{
"epoch": 0.2905803535394301,
"eval_loss": 0.4249188303947449,
"eval_runtime": 111.8946,
"eval_samples_per_second": 36.159,
"eval_steps_per_second": 2.261,
"step": 450
},
{
"epoch": 0.2970376947291953,
"grad_norm": 2.732994123391758,
"learning_rate": 8.863039526996891e-06,
"loss": 0.4717,
"step": 460
},
{
"epoch": 0.3034950359189604,
"grad_norm": 3.0988687687698677,
"learning_rate": 8.790471500856229e-06,
"loss": 0.4905,
"step": 470
},
{
"epoch": 0.3099523771087255,
"grad_norm": 3.0947098221576197,
"learning_rate": 8.715975628707917e-06,
"loss": 0.5077,
"step": 480
},
{
"epoch": 0.31640971829849057,
"grad_norm": 2.94319414282237,
"learning_rate": 8.63958979939235e-06,
"loss": 0.485,
"step": 490
},
{
"epoch": 0.3228670594882557,
"grad_norm": 3.16584229914845,
"learning_rate": 8.56135286298822e-06,
"loss": 0.4683,
"step": 500
},
{
"epoch": 0.3228670594882557,
"eval_loss": 0.4224134683609009,
"eval_runtime": 111.9514,
"eval_samples_per_second": 36.141,
"eval_steps_per_second": 2.26,
"step": 500
},
{
"epoch": 0.3293244006780208,
"grad_norm": 2.800609243217591,
"learning_rate": 8.48130461105324e-06,
"loss": 0.4983,
"step": 510
},
{
"epoch": 0.3357817418677859,
"grad_norm": 3.071948165652889,
"learning_rate": 8.399485756386038e-06,
"loss": 0.4568,
"step": 520
},
{
"epoch": 0.3422390830575511,
"grad_norm": 3.0648029677410675,
"learning_rate": 8.315937912319493e-06,
"loss": 0.47,
"step": 530
},
{
"epoch": 0.3486964242473162,
"grad_norm": 2.851989726691387,
"learning_rate": 8.23070357155605e-06,
"loss": 0.4703,
"step": 540
},
{
"epoch": 0.35515376543708127,
"grad_norm": 3.050910633248402,
"learning_rate": 8.143826084555809e-06,
"loss": 0.4572,
"step": 550
},
{
"epoch": 0.35515376543708127,
"eval_loss": 0.4135887324810028,
"eval_runtime": 111.8521,
"eval_samples_per_second": 36.173,
"eval_steps_per_second": 2.262,
"step": 550
},
{
"epoch": 0.3616111066268464,
"grad_norm": 2.981485417190354,
"learning_rate": 8.055349637488336e-06,
"loss": 0.4639,
"step": 560
},
{
"epoch": 0.3680684478166115,
"grad_norm": 2.9699353695800745,
"learning_rate": 7.965319229759435e-06,
"loss": 0.4606,
"step": 570
},
{
"epoch": 0.3745257890063766,
"grad_norm": 2.7984725950234433,
"learning_rate": 7.873780651124302e-06,
"loss": 0.4764,
"step": 580
},
{
"epoch": 0.3809831301961417,
"grad_norm": 2.616017681487916,
"learning_rate": 7.78078045839871e-06,
"loss": 0.4473,
"step": 590
},
{
"epoch": 0.38744047138590687,
"grad_norm": 2.826757920966078,
"learning_rate": 7.686365951780071e-06,
"loss": 0.456,
"step": 600
},
{
"epoch": 0.38744047138590687,
"eval_loss": 0.4034173786640167,
"eval_runtime": 111.9212,
"eval_samples_per_second": 36.15,
"eval_steps_per_second": 2.261,
"step": 600
},
{
"epoch": 0.39389781257567197,
"grad_norm": 2.825465317099364,
"learning_rate": 7.590585150790388e-06,
"loss": 0.4416,
"step": 610
},
{
"epoch": 0.40035515376543707,
"grad_norm": 2.9443179257564385,
"learning_rate": 7.493486769853383e-06,
"loss": 0.4652,
"step": 620
},
{
"epoch": 0.4068124949552022,
"grad_norm": 2.7289719580286893,
"learning_rate": 7.39512019351818e-06,
"loss": 0.4558,
"step": 630
},
{
"epoch": 0.4132698361449673,
"grad_norm": 2.66796889335289,
"learning_rate": 7.295535451342166e-06,
"loss": 0.4351,
"step": 640
},
{
"epoch": 0.4197271773347324,
"grad_norm": 3.07324590421804,
"learning_rate": 7.194783192445795e-06,
"loss": 0.4606,
"step": 650
},
{
"epoch": 0.4197271773347324,
"eval_loss": 0.39828598499298096,
"eval_runtime": 111.8888,
"eval_samples_per_second": 36.161,
"eval_steps_per_second": 2.261,
"step": 650
},
{
"epoch": 0.4261845185244975,
"grad_norm": 2.728311440018257,
"learning_rate": 7.092914659752286e-06,
"loss": 0.4533,
"step": 660
},
{
"epoch": 0.43264185971426267,
"grad_norm": 3.053431956925498,
"learning_rate": 6.989981663925301e-06,
"loss": 0.4521,
"step": 670
},
{
"epoch": 0.43909920090402776,
"grad_norm": 2.692808749624645,
"learning_rate": 6.886036557017881e-06,
"loss": 0.4658,
"step": 680
},
{
"epoch": 0.44555654209379286,
"grad_norm": 2.8255493961476144,
"learning_rate": 6.781132205846019e-06,
"loss": 0.4476,
"step": 690
},
{
"epoch": 0.452013883283558,
"grad_norm": 2.9302657855607612,
"learning_rate": 6.675321965100431e-06,
"loss": 0.4285,
"step": 700
},
{
"epoch": 0.452013883283558,
"eval_loss": 0.38738012313842773,
"eval_runtime": 111.8894,
"eval_samples_per_second": 36.161,
"eval_steps_per_second": 2.261,
"step": 700
},
{
"epoch": 0.4584712244733231,
"grad_norm": 2.81795203028901,
"learning_rate": 6.568659650210184e-06,
"loss": 0.4381,
"step": 710
},
{
"epoch": 0.4649285656630882,
"grad_norm": 2.6773940869357764,
"learning_rate": 6.461199509972001e-06,
"loss": 0.4264,
"step": 720
},
{
"epoch": 0.47138590685285336,
"grad_norm": 2.5004598549233354,
"learning_rate": 6.35299619895914e-06,
"loss": 0.4143,
"step": 730
},
{
"epoch": 0.47784324804261846,
"grad_norm": 2.8184324008176582,
"learning_rate": 6.244104749723916e-06,
"loss": 0.4364,
"step": 740
},
{
"epoch": 0.48430058923238356,
"grad_norm": 2.7733015055520718,
"learning_rate": 6.134580544807951e-06,
"loss": 0.4499,
"step": 750
},
{
"epoch": 0.48430058923238356,
"eval_loss": 0.38058942556381226,
"eval_runtime": 111.8743,
"eval_samples_per_second": 36.166,
"eval_steps_per_second": 2.261,
"step": 750
},
{
"epoch": 0.49075793042214866,
"grad_norm": 2.678406490336573,
"learning_rate": 6.024479288574448e-06,
"loss": 0.4374,
"step": 760
},
{
"epoch": 0.4972152716119138,
"grad_norm": 2.927553773587219,
"learning_rate": 5.9138569788767645e-06,
"loss": 0.4201,
"step": 770
},
{
"epoch": 0.5036726128016789,
"grad_norm": 2.9320183431217166,
"learning_rate": 5.802769878577729e-06,
"loss": 0.4201,
"step": 780
},
{
"epoch": 0.510129953991444,
"grad_norm": 2.6304212034468875,
"learning_rate": 5.691274486934166e-06,
"loss": 0.4258,
"step": 790
},
{
"epoch": 0.5165872951812092,
"grad_norm": 2.9033619402388475,
"learning_rate": 5.579427510861194e-06,
"loss": 0.4198,
"step": 800
},
{
"epoch": 0.5165872951812092,
"eval_loss": 0.36852195858955383,
"eval_runtime": 111.8939,
"eval_samples_per_second": 36.159,
"eval_steps_per_second": 2.261,
"step": 800
},
{
"epoch": 0.5230446363709742,
"grad_norm": 2.7380975946416615,
"learning_rate": 5.4672858360909e-06,
"loss": 0.4268,
"step": 810
},
{
"epoch": 0.5295019775607394,
"grad_norm": 2.6318513335680525,
"learning_rate": 5.35490649824008e-06,
"loss": 0.4219,
"step": 820
},
{
"epoch": 0.5359593187505045,
"grad_norm": 2.729811496006138,
"learning_rate": 5.242346653801739e-06,
"loss": 0.4447,
"step": 830
},
{
"epoch": 0.5424166599402696,
"grad_norm": 2.8046958926916123,
"learning_rate": 5.129663551075109e-06,
"loss": 0.4225,
"step": 840
},
{
"epoch": 0.5488740011300347,
"grad_norm": 2.479851593229805,
"learning_rate": 5.016914501048988e-06,
"loss": 0.4208,
"step": 850
},
{
"epoch": 0.5488740011300347,
"eval_loss": 0.366115003824234,
"eval_runtime": 111.9956,
"eval_samples_per_second": 36.126,
"eval_steps_per_second": 2.259,
"step": 850
},
{
"epoch": 0.5553313423197999,
"grad_norm": 2.574977762111032,
"learning_rate": 4.904156848253184e-06,
"loss": 0.415,
"step": 860
},
{
"epoch": 0.5617886835095649,
"grad_norm": 2.918973319415968,
"learning_rate": 4.791447941592896e-06,
"loss": 0.4238,
"step": 870
},
{
"epoch": 0.56824602469933,
"grad_norm": 2.6402399209732863,
"learning_rate": 4.678845105180886e-06,
"loss": 0.4075,
"step": 880
},
{
"epoch": 0.5747033658890952,
"grad_norm": 2.513263276275295,
"learning_rate": 4.566405609182247e-06,
"loss": 0.4042,
"step": 890
},
{
"epoch": 0.5811607070788603,
"grad_norm": 2.407452866472799,
"learning_rate": 4.454186640686607e-06,
"loss": 0.4379,
"step": 900
},
{
"epoch": 0.5811607070788603,
"eval_loss": 0.3636697232723236,
"eval_runtime": 111.939,
"eval_samples_per_second": 36.145,
"eval_steps_per_second": 2.26,
"step": 900
},
{
"epoch": 0.5876180482686254,
"grad_norm": 2.813924190060926,
"learning_rate": 4.3422452746226e-06,
"loss": 0.4009,
"step": 910
},
{
"epoch": 0.5940753894583906,
"grad_norm": 2.7536071015362262,
"learning_rate": 4.230638444729368e-06,
"loss": 0.4368,
"step": 920
},
{
"epoch": 0.6005327306481556,
"grad_norm": 2.7536457081859362,
"learning_rate": 4.1194229145998795e-06,
"loss": 0.4047,
"step": 930
},
{
"epoch": 0.6069900718379208,
"grad_norm": 2.5417632926155065,
"learning_rate": 4.008655248810787e-06,
"loss": 0.3952,
"step": 940
},
{
"epoch": 0.6134474130276858,
"grad_norm": 2.539496645334203,
"learning_rate": 3.898391784153494e-06,
"loss": 0.4075,
"step": 950
},
{
"epoch": 0.6134474130276858,
"eval_loss": 0.3558257520198822,
"eval_runtime": 111.9665,
"eval_samples_per_second": 36.136,
"eval_steps_per_second": 2.26,
"step": 950
},
{
"epoch": 0.619904754217451,
"grad_norm": 2.6691260702161093,
"learning_rate": 3.788688600981085e-06,
"loss": 0.405,
"step": 960
},
{
"epoch": 0.6263620954072161,
"grad_norm": 2.551402138928399,
"learning_rate": 3.679601494685679e-06,
"loss": 0.417,
"step": 970
},
{
"epoch": 0.6328194365969811,
"grad_norm": 2.5606407585370152,
"learning_rate": 3.571185947320712e-06,
"loss": 0.4077,
"step": 980
},
{
"epoch": 0.6392767777867463,
"grad_norm": 2.6994790732851612,
"learning_rate": 3.4634970993825854e-06,
"loss": 0.3983,
"step": 990
},
{
"epoch": 0.6457341189765115,
"grad_norm": 2.731049592606316,
"learning_rate": 3.356589721766034e-06,
"loss": 0.4121,
"step": 1000
},
{
"epoch": 0.6457341189765115,
"eval_loss": 0.35134828090667725,
"eval_runtime": 111.9125,
"eval_samples_per_second": 36.153,
"eval_steps_per_second": 2.261,
"step": 1000
},
{
"epoch": 0.6521914601662765,
"grad_norm": 2.979542200795537,
"learning_rate": 3.250518187907469e-06,
"loss": 0.3869,
"step": 1010
},
{
"epoch": 0.6586488013560416,
"grad_norm": 2.675413679581036,
"learning_rate": 3.1453364461304795e-06,
"loss": 0.3753,
"step": 1020
},
{
"epoch": 0.6651061425458068,
"grad_norm": 2.6493675186157404,
"learning_rate": 3.0410979922075344e-06,
"loss": 0.3915,
"step": 1030
},
{
"epoch": 0.6715634837355718,
"grad_norm": 2.469295262531258,
"learning_rate": 2.9378558421518645e-06,
"loss": 0.4089,
"step": 1040
},
{
"epoch": 0.678020824925337,
"grad_norm": 2.5114917155985728,
"learning_rate": 2.835662505253344e-06,
"loss": 0.4112,
"step": 1050
},
{
"epoch": 0.678020824925337,
"eval_loss": 0.34539544582366943,
"eval_runtime": 111.9229,
"eval_samples_per_second": 36.15,
"eval_steps_per_second": 2.26,
"step": 1050
},
{
"epoch": 0.6844781661151021,
"grad_norm": 2.685555421636365,
"learning_rate": 2.7345699573721014e-06,
"loss": 0.3993,
"step": 1060
},
{
"epoch": 0.6909355073048672,
"grad_norm": 2.654393738157245,
"learning_rate": 2.634629614503428e-06,
"loss": 0.388,
"step": 1070
},
{
"epoch": 0.6973928484946323,
"grad_norm": 2.617694622896868,
"learning_rate": 2.5358923066274354e-06,
"loss": 0.3992,
"step": 1080
},
{
"epoch": 0.7038501896843975,
"grad_norm": 2.31267040049805,
"learning_rate": 2.4384082518567643e-06,
"loss": 0.3801,
"step": 1090
},
{
"epoch": 0.7103075308741625,
"grad_norm": 2.569343236018448,
"learning_rate": 2.3422270308954936e-06,
"loss": 0.4041,
"step": 1100
},
{
"epoch": 0.7103075308741625,
"eval_loss": 0.3457443118095398,
"eval_runtime": 111.8667,
"eval_samples_per_second": 36.168,
"eval_steps_per_second": 2.262,
"step": 1100
},
{
"epoch": 0.7167648720639277,
"grad_norm": 2.460336553937589,
"learning_rate": 2.2473975618222304e-06,
"loss": 0.3909,
"step": 1110
},
{
"epoch": 0.7232222132536928,
"grad_norm": 2.5304504216094523,
"learning_rate": 2.1539680752102217e-06,
"loss": 0.3952,
"step": 1120
},
{
"epoch": 0.7296795544434579,
"grad_norm": 2.4097797244963077,
"learning_rate": 2.0619860895971266e-06,
"loss": 0.3736,
"step": 1130
},
{
"epoch": 0.736136895633223,
"grad_norm": 2.77218237063129,
"learning_rate": 1.9714983873169376e-06,
"loss": 0.4042,
"step": 1140
},
{
"epoch": 0.7425942368229881,
"grad_norm": 2.3331103182547976,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.3852,
"step": 1150
},
{
"epoch": 0.7425942368229881,
"eval_loss": 0.33838221430778503,
"eval_runtime": 111.9392,
"eval_samples_per_second": 36.145,
"eval_steps_per_second": 2.26,
"step": 1150
},
{
"epoch": 0.7490515780127532,
"grad_norm": 3.014561790563234,
"learning_rate": 1.7951891386975684e-06,
"loss": 0.3756,
"step": 1160
},
{
"epoch": 0.7555089192025184,
"grad_norm": 2.4960607171337297,
"learning_rate": 1.7094572638098122e-06,
"loss": 0.3561,
"step": 1170
},
{
"epoch": 0.7619662603922834,
"grad_norm": 2.3869622987622527,
"learning_rate": 1.6253989695506207e-06,
"loss": 0.3755,
"step": 1180
},
{
"epoch": 0.7684236015820486,
"grad_norm": 2.431698639338266,
"learning_rate": 1.5430570082390545e-06,
"loss": 0.3709,
"step": 1190
},
{
"epoch": 0.7748809427718137,
"grad_norm": 2.6565827092298284,
"learning_rate": 1.462473259261713e-06,
"loss": 0.3656,
"step": 1200
},
{
"epoch": 0.7748809427718137,
"eval_loss": 0.33403000235557556,
"eval_runtime": 111.9782,
"eval_samples_per_second": 36.132,
"eval_steps_per_second": 2.259,
"step": 1200
},
{
"epoch": 0.7813382839615788,
"grad_norm": 2.678574249364229,
"learning_rate": 1.3836887077727424e-06,
"loss": 0.3842,
"step": 1210
},
{
"epoch": 0.7877956251513439,
"grad_norm": 2.7547839619954213,
"learning_rate": 1.3067434238486527e-06,
"loss": 0.3773,
"step": 1220
},
{
"epoch": 0.7942529663411091,
"grad_norm": 2.7583655228352564,
"learning_rate": 1.2316765421085513e-06,
"loss": 0.3769,
"step": 1230
},
{
"epoch": 0.8007103075308741,
"grad_norm": 2.7630568820041668,
"learning_rate": 1.1585262418101468e-06,
"loss": 0.3586,
"step": 1240
},
{
"epoch": 0.8071676487206393,
"grad_norm": 2.6227879310348405,
"learning_rate": 1.0873297274316568e-06,
"loss": 0.384,
"step": 1250
},
{
"epoch": 0.8071676487206393,
"eval_loss": 0.3302658796310425,
"eval_runtime": 111.9384,
"eval_samples_per_second": 36.145,
"eval_steps_per_second": 2.26,
"step": 1250
},
{
"epoch": 0.8136249899104044,
"grad_norm": 2.510577646614575,
"learning_rate": 1.0181232097494904e-06,
"loss": 0.3816,
"step": 1260
},
{
"epoch": 0.8200823311001695,
"grad_norm": 2.5172171373880627,
"learning_rate": 9.509418874213316e-07,
"loss": 0.3651,
"step": 1270
},
{
"epoch": 0.8265396722899346,
"grad_norm": 2.4416458810554853,
"learning_rate": 8.858199290839859e-07,
"loss": 0.3573,
"step": 1280
},
{
"epoch": 0.8329970134796998,
"grad_norm": 2.8967263378458457,
"learning_rate": 8.22790455975106e-07,
"loss": 0.383,
"step": 1290
},
{
"epoch": 0.8394543546694648,
"grad_norm": 2.525922134184123,
"learning_rate": 7.61885525087619e-07,
"loss": 0.3605,
"step": 1300
},
{
"epoch": 0.8394543546694648,
"eval_loss": 0.327566921710968,
"eval_runtime": 111.9302,
"eval_samples_per_second": 36.148,
"eval_steps_per_second": 2.26,
"step": 1300
},
{
"epoch": 0.84591169585923,
"grad_norm": 2.4695538863228497,
"learning_rate": 7.031361128654402e-07,
"loss": 0.3508,
"step": 1310
},
{
"epoch": 0.852369037048995,
"grad_norm": 2.6517631021997685,
"learning_rate": 6.465720994487523e-07,
"loss": 0.3806,
"step": 1320
},
{
"epoch": 0.8588263782387602,
"grad_norm": 2.7621954854306843,
"learning_rate": 5.922222534768707e-07,
"loss": 0.3838,
"step": 1330
},
{
"epoch": 0.8652837194285253,
"grad_norm": 2.5607471184144077,
"learning_rate": 5.401142174564195e-07,
"loss": 0.3627,
"step": 1340
},
{
"epoch": 0.8717410606182904,
"grad_norm": 2.595008916816477,
"learning_rate": 4.902744937022658e-07,
"loss": 0.3593,
"step": 1350
},
{
"epoch": 0.8717410606182904,
"eval_loss": 0.3246610164642334,
"eval_runtime": 111.9365,
"eval_samples_per_second": 36.145,
"eval_steps_per_second": 2.26,
"step": 1350
},
{
"epoch": 0.8781984018080555,
"grad_norm": 2.414235664744641,
"learning_rate": 4.4272843085835515e-07,
"loss": 0.3804,
"step": 1360
},
{
"epoch": 0.8846557429978207,
"grad_norm": 2.419217625399164,
"learning_rate": 3.9750021100531e-07,
"loss": 0.3764,
"step": 1370
},
{
"epoch": 0.8911130841875857,
"grad_norm": 2.6264090328200225,
"learning_rate": 3.546128373613472e-07,
"loss": 0.3743,
"step": 1380
},
{
"epoch": 0.8975704253773509,
"grad_norm": 2.707604001939969,
"learning_rate": 3.1408812258276575e-07,
"loss": 0.3566,
"step": 1390
},
{
"epoch": 0.904027766567116,
"grad_norm": 2.622265420891989,
"learning_rate": 2.7594667766995933e-07,
"loss": 0.3624,
"step": 1400
},
{
"epoch": 0.904027766567116,
"eval_loss": 0.323292076587677,
"eval_runtime": 111.9791,
"eval_samples_per_second": 36.132,
"eval_steps_per_second": 2.259,
"step": 1400
},
{
"epoch": 0.9104851077568811,
"grad_norm": 2.6447995971566876,
"learning_rate": 2.402079014845943e-07,
"loss": 0.3792,
"step": 1410
},
{
"epoch": 0.9169424489466462,
"grad_norm": 2.3016748893365193,
"learning_rate": 2.068899708832911e-07,
"loss": 0.3812,
"step": 1420
},
{
"epoch": 0.9233997901364114,
"grad_norm": 2.6336324469401697,
"learning_rate": 1.7600983147280982e-07,
"loss": 0.3579,
"step": 1430
},
{
"epoch": 0.9298571313261764,
"grad_norm": 2.559116903117589,
"learning_rate": 1.475831889914642e-07,
"loss": 0.3504,
"step": 1440
},
{
"epoch": 0.9363144725159416,
"grad_norm": 2.440040379975165,
"learning_rate": 1.2162450132113202e-07,
"loss": 0.3734,
"step": 1450
},
{
"epoch": 0.9363144725159416,
"eval_loss": 0.3228907585144043,
"eval_runtime": 111.9148,
"eval_samples_per_second": 36.153,
"eval_steps_per_second": 2.261,
"step": 1450
},
{
"epoch": 0.9427718137057067,
"grad_norm": 2.444760573532335,
"learning_rate": 9.814697113392835e-08,
"loss": 0.3687,
"step": 1460
},
{
"epoch": 0.9492291548954718,
"grad_norm": 2.5518514323077888,
"learning_rate": 7.716253917728622e-08,
"loss": 0.3485,
"step": 1470
},
{
"epoch": 0.9556864960852369,
"grad_norm": 2.3430257341500043,
"learning_rate": 5.8681878200850805e-08,
"loss": 0.3713,
"step": 1480
},
{
"epoch": 0.962143837275002,
"grad_norm": 2.517063988800139,
"learning_rate": 4.27143875282876e-08,
"loss": 0.3499,
"step": 1490
},
{
"epoch": 0.9686011784647671,
"grad_norm": 2.4117594779372586,
"learning_rate": 2.9268188276757035e-08,
"loss": 0.3609,
"step": 1500
},
{
"epoch": 0.9686011784647671,
"eval_loss": 0.3223486840724945,
"eval_runtime": 111.972,
"eval_samples_per_second": 36.134,
"eval_steps_per_second": 2.259,
"step": 1500
},
{
"epoch": 0.9750585196545323,
"grad_norm": 2.2584672304426157,
"learning_rate": 1.8350119226483442e-08,
"loss": 0.3609,
"step": 1510
},
{
"epoch": 0.9815158608442973,
"grad_norm": 2.4425634500856472,
"learning_rate": 9.965733342532925e-09,
"loss": 0.4155,
"step": 1520
},
{
"epoch": 0.9879732020340625,
"grad_norm": 2.3744660767238868,
"learning_rate": 4.119294950558072e-09,
"loss": 0.3591,
"step": 1530
},
{
"epoch": 0.9944305432238276,
"grad_norm": 2.4281464000637336,
"learning_rate": 8.137775679456505e-10,
"loss": 0.3655,
"step": 1540
},
{
"epoch": 0.9995964161756397,
"step": 1548,
"total_flos": 836493551075328.0,
"train_loss": 0.4527231248466235,
"train_runtime": 23130.6046,
"train_samples_per_second": 8.569,
"train_steps_per_second": 0.067
}
],
"logging_steps": 10,
"max_steps": 1548,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 836493551075328.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}