llama-3-1-qlora-2 / trainer_state.json
Khaliladib's picture
Model save
2d6a0cf verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 222,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06756756756756757,
"grad_norm": 7.765780925750732,
"learning_rate": 0.0002,
"loss": 2.477,
"step": 5
},
{
"epoch": 0.13513513513513514,
"grad_norm": 2.2755630016326904,
"learning_rate": 0.0002,
"loss": 1.4771,
"step": 10
},
{
"epoch": 0.20270270270270271,
"grad_norm": 1.7242066860198975,
"learning_rate": 0.0002,
"loss": 1.2418,
"step": 15
},
{
"epoch": 0.2702702702702703,
"grad_norm": 95.61922454833984,
"learning_rate": 0.0002,
"loss": 1.1066,
"step": 20
},
{
"epoch": 0.33783783783783783,
"grad_norm": 1.10884690284729,
"learning_rate": 0.0002,
"loss": 1.0927,
"step": 25
},
{
"epoch": 0.40540540540540543,
"grad_norm": 0.9811855554580688,
"learning_rate": 0.0002,
"loss": 1.0442,
"step": 30
},
{
"epoch": 0.47297297297297297,
"grad_norm": 0.874477744102478,
"learning_rate": 0.0002,
"loss": 1.0362,
"step": 35
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.9105525612831116,
"learning_rate": 0.0002,
"loss": 1.0281,
"step": 40
},
{
"epoch": 0.6081081081081081,
"grad_norm": 0.7913025617599487,
"learning_rate": 0.0002,
"loss": 0.9934,
"step": 45
},
{
"epoch": 0.6756756756756757,
"grad_norm": 0.8067978024482727,
"learning_rate": 0.0002,
"loss": 0.9768,
"step": 50
},
{
"epoch": 0.7432432432432432,
"grad_norm": 0.8261044025421143,
"learning_rate": 0.0002,
"loss": 0.9662,
"step": 55
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.8108837604522705,
"learning_rate": 0.0002,
"loss": 0.9668,
"step": 60
},
{
"epoch": 0.8783783783783784,
"grad_norm": 0.7348493933677673,
"learning_rate": 0.0002,
"loss": 0.9431,
"step": 65
},
{
"epoch": 0.9459459459459459,
"grad_norm": 0.7668595314025879,
"learning_rate": 0.0002,
"loss": 0.9235,
"step": 70
},
{
"epoch": 1.0135135135135136,
"grad_norm": 0.7864215970039368,
"learning_rate": 0.0002,
"loss": 0.8857,
"step": 75
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.754004180431366,
"learning_rate": 0.0002,
"loss": 0.7299,
"step": 80
},
{
"epoch": 1.1486486486486487,
"grad_norm": 0.6962944865226746,
"learning_rate": 0.0002,
"loss": 0.7184,
"step": 85
},
{
"epoch": 1.2162162162162162,
"grad_norm": 0.969588041305542,
"learning_rate": 0.0002,
"loss": 0.7025,
"step": 90
},
{
"epoch": 1.2837837837837838,
"grad_norm": 0.6978746652603149,
"learning_rate": 0.0002,
"loss": 0.7014,
"step": 95
},
{
"epoch": 1.3513513513513513,
"grad_norm": 0.7287507653236389,
"learning_rate": 0.0002,
"loss": 0.6986,
"step": 100
},
{
"epoch": 1.4189189189189189,
"grad_norm": 0.7103177309036255,
"learning_rate": 0.0002,
"loss": 0.7347,
"step": 105
},
{
"epoch": 1.4864864864864864,
"grad_norm": 0.6908261775970459,
"learning_rate": 0.0002,
"loss": 0.7083,
"step": 110
},
{
"epoch": 1.554054054054054,
"grad_norm": 0.7442795634269714,
"learning_rate": 0.0002,
"loss": 0.6938,
"step": 115
},
{
"epoch": 1.6216216216216215,
"grad_norm": 0.6897634267807007,
"learning_rate": 0.0002,
"loss": 0.73,
"step": 120
},
{
"epoch": 1.689189189189189,
"grad_norm": 0.6686336994171143,
"learning_rate": 0.0002,
"loss": 0.7234,
"step": 125
},
{
"epoch": 1.7567567567567568,
"grad_norm": 0.6796671748161316,
"learning_rate": 0.0002,
"loss": 0.7173,
"step": 130
},
{
"epoch": 1.8243243243243243,
"grad_norm": 0.6994472146034241,
"learning_rate": 0.0002,
"loss": 0.6866,
"step": 135
},
{
"epoch": 1.8918918918918919,
"grad_norm": 0.6973745226860046,
"learning_rate": 0.0002,
"loss": 0.7225,
"step": 140
},
{
"epoch": 1.9594594594594594,
"grad_norm": 0.6692176461219788,
"learning_rate": 0.0002,
"loss": 0.7032,
"step": 145
},
{
"epoch": 2.027027027027027,
"grad_norm": 0.5986601114273071,
"learning_rate": 0.0002,
"loss": 0.6413,
"step": 150
},
{
"epoch": 2.0945945945945947,
"grad_norm": 0.6870444416999817,
"learning_rate": 0.0002,
"loss": 0.5096,
"step": 155
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.6344185471534729,
"learning_rate": 0.0002,
"loss": 0.5231,
"step": 160
},
{
"epoch": 2.22972972972973,
"grad_norm": 0.6490407586097717,
"learning_rate": 0.0002,
"loss": 0.5095,
"step": 165
},
{
"epoch": 2.2972972972972974,
"grad_norm": 0.7220724821090698,
"learning_rate": 0.0002,
"loss": 0.5243,
"step": 170
},
{
"epoch": 2.364864864864865,
"grad_norm": 0.6581041812896729,
"learning_rate": 0.0002,
"loss": 0.5064,
"step": 175
},
{
"epoch": 2.4324324324324325,
"grad_norm": 0.6284764409065247,
"learning_rate": 0.0002,
"loss": 0.5093,
"step": 180
},
{
"epoch": 2.5,
"grad_norm": 0.7284560203552246,
"learning_rate": 0.0002,
"loss": 0.5312,
"step": 185
},
{
"epoch": 2.5675675675675675,
"grad_norm": 0.723694920539856,
"learning_rate": 0.0002,
"loss": 0.5565,
"step": 190
},
{
"epoch": 2.635135135135135,
"grad_norm": 0.6478952169418335,
"learning_rate": 0.0002,
"loss": 0.5273,
"step": 195
},
{
"epoch": 2.7027027027027026,
"grad_norm": 0.6292466521263123,
"learning_rate": 0.0002,
"loss": 0.5167,
"step": 200
},
{
"epoch": 2.77027027027027,
"grad_norm": 0.6427861452102661,
"learning_rate": 0.0002,
"loss": 0.5256,
"step": 205
},
{
"epoch": 2.8378378378378377,
"grad_norm": 0.6279394626617432,
"learning_rate": 0.0002,
"loss": 0.5138,
"step": 210
},
{
"epoch": 2.9054054054054053,
"grad_norm": 0.6324751973152161,
"learning_rate": 0.0002,
"loss": 0.527,
"step": 215
},
{
"epoch": 2.972972972972973,
"grad_norm": 0.6362230181694031,
"learning_rate": 0.0002,
"loss": 0.524,
"step": 220
},
{
"epoch": 3.0,
"step": 222,
"total_flos": 1.7527093219609805e+17,
"train_loss": 0.7947500686387758,
"train_runtime": 3770.1678,
"train_samples_per_second": 0.937,
"train_steps_per_second": 0.059
}
],
"logging_steps": 5,
"max_steps": 222,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7527093219609805e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}