dada22231 commited on
Commit
4696a30
·
verified ·
1 Parent(s): e7022f3

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f7ec55d0d67152854ebe7902e368f300849fec538a8f021e2f7fd19c849a17f
3
  size 36981072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32dd43c67f0edd6beb327bf268e0bba624d5e22b7e0389c6e1a868f1ad79b84b
3
  size 36981072
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbe70b6dff690678e2ebe12b45bd002e54cbccf629c844c816f1ed09daf3f010
3
  size 19859140
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82bfe3fe3268416258cdaa02544d696a142992b49f3284be27b8007ab72f7236
3
  size 19859140
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b543366cfabdd66b7ed63582e5303f4bff99a84636648762ece313be1d5466ed
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:024409963b12e6d11d1a7cb7bfc91b755e58587bf312b0dce1b1b785fdb23734
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c8e6b04902f17ae368c3e6cfd97a31ad4de2f025d673daea8c033ce0e260946
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.004729374671571204,
6
  "eval_steps": 3,
7
- "global_step": 9,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -103,6 +103,13 @@
103
  "eval_samples_per_second": 39.746,
104
  "eval_steps_per_second": 19.873,
105
  "step": 9
 
 
 
 
 
 
 
106
  }
107
  ],
108
  "logging_steps": 1,
@@ -117,12 +124,12 @@
117
  "should_evaluate": false,
118
  "should_log": false,
119
  "should_save": true,
120
- "should_training_stop": false
121
  },
122
  "attributes": {}
123
  }
124
  },
125
- "total_flos": 291868432662528.0,
126
  "train_batch_size": 2,
127
  "trial_name": null,
128
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.005254860746190226,
6
  "eval_steps": 3,
7
+ "global_step": 10,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
103
  "eval_samples_per_second": 39.746,
104
  "eval_steps_per_second": 19.873,
105
  "step": 9
106
+ },
107
+ {
108
+ "epoch": 0.005254860746190226,
109
+ "grad_norm": 0.4398048520088196,
110
+ "learning_rate": 0.00018,
111
+ "loss": 0.0441,
112
+ "step": 10
113
  }
114
  ],
115
  "logging_steps": 1,
 
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
+ "should_training_stop": true
128
  },
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 324298258513920.0,
133
  "train_batch_size": 2,
134
  "trial_name": null,
135
  "trial_params": null