lewtun HF Staff commited on
Commit
22d8b46
·
verified ·
1 Parent(s): 19211bb

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-1.5B
3
+ library_name: transformers
4
+ model_name: Qwen2.5-1.5B-Open-R1-Distill
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-1.5B-Open-R1-Distill
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B](https://huggingface.co/Qwen/Qwen2.5-1.5B).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="lewtun/Qwen2.5-1.5B-Open-R1-Distill", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/huggingface/huggingface/runs/bmkflnwa)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.17.0.dev0
38
+ - Transformers: 4.51.2
39
+ - Pytorch: 2.6.0
40
+ - Datasets: 3.5.0
41
+ - Tokenizers: 0.21.1
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1.0371853292601868e+19,
3
+ "train_loss": 0.5346580615310357,
4
+ "train_runtime": 5110.5186,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 18.341,
7
+ "train_steps_per_second": 0.143
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.51.2"
6
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1.0371853292601868e+19,
3
+ "train_loss": 0.5346580615310357,
4
+ "train_runtime": 5110.5186,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 18.341,
7
+ "train_steps_per_second": 0.143
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 733,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0068212824010914054,
14
+ "grad_norm": 1.5664408206939697,
15
+ "learning_rate": 5.405405405405406e-06,
16
+ "loss": 0.8612,
17
+ "num_tokens": 3753458.0,
18
+ "step": 5
19
+ },
20
+ {
21
+ "epoch": 0.013642564802182811,
22
+ "grad_norm": 1.5331162214279175,
23
+ "learning_rate": 1.2162162162162164e-05,
24
+ "loss": 0.8129,
25
+ "num_tokens": 7657459.0,
26
+ "step": 10
27
+ },
28
+ {
29
+ "epoch": 0.020463847203274217,
30
+ "grad_norm": 0.7885063886642456,
31
+ "learning_rate": 1.891891891891892e-05,
32
+ "loss": 0.7307,
33
+ "num_tokens": 11351827.0,
34
+ "step": 15
35
+ },
36
+ {
37
+ "epoch": 0.027285129604365622,
38
+ "grad_norm": 0.5782262682914734,
39
+ "learning_rate": 2.5675675675675675e-05,
40
+ "loss": 0.686,
41
+ "num_tokens": 15095356.0,
42
+ "step": 20
43
+ },
44
+ {
45
+ "epoch": 0.034106412005457026,
46
+ "grad_norm": 0.49164527654647827,
47
+ "learning_rate": 3.2432432432432436e-05,
48
+ "loss": 0.6616,
49
+ "num_tokens": 18878489.0,
50
+ "step": 25
51
+ },
52
+ {
53
+ "epoch": 0.040927694406548434,
54
+ "grad_norm": 0.47542551159858704,
55
+ "learning_rate": 3.918918918918919e-05,
56
+ "loss": 0.6376,
57
+ "num_tokens": 22607726.0,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.047748976807639835,
62
+ "grad_norm": 0.3627755045890808,
63
+ "learning_rate": 4.594594594594595e-05,
64
+ "loss": 0.6316,
65
+ "num_tokens": 26596975.0,
66
+ "step": 35
67
+ },
68
+ {
69
+ "epoch": 0.054570259208731244,
70
+ "grad_norm": 0.4120884835720062,
71
+ "learning_rate": 4.999908316574644e-05,
72
+ "loss": 0.5997,
73
+ "num_tokens": 30372607.0,
74
+ "step": 40
75
+ },
76
+ {
77
+ "epoch": 0.061391541609822645,
78
+ "grad_norm": 0.3788425028324127,
79
+ "learning_rate": 4.998876963847189e-05,
80
+ "loss": 0.6009,
81
+ "num_tokens": 34180321.0,
82
+ "step": 45
83
+ },
84
+ {
85
+ "epoch": 0.06821282401091405,
86
+ "grad_norm": 0.3772900700569153,
87
+ "learning_rate": 4.996700181165029e-05,
88
+ "loss": 0.6005,
89
+ "num_tokens": 37904755.0,
90
+ "step": 50
91
+ },
92
+ {
93
+ "epoch": 0.07503410641200546,
94
+ "grad_norm": 0.42741668224334717,
95
+ "learning_rate": 4.993379077238036e-05,
96
+ "loss": 0.601,
97
+ "num_tokens": 41764548.0,
98
+ "step": 55
99
+ },
100
+ {
101
+ "epoch": 0.08185538881309687,
102
+ "grad_norm": 0.4014478027820587,
103
+ "learning_rate": 4.9889153436180295e-05,
104
+ "loss": 0.5889,
105
+ "num_tokens": 45475394.0,
106
+ "step": 60
107
+ },
108
+ {
109
+ "epoch": 0.08867667121418826,
110
+ "grad_norm": 0.41353562474250793,
111
+ "learning_rate": 4.983311253837213e-05,
112
+ "loss": 0.5926,
113
+ "num_tokens": 49295807.0,
114
+ "step": 65
115
+ },
116
+ {
117
+ "epoch": 0.09549795361527967,
118
+ "grad_norm": 0.34759485721588135,
119
+ "learning_rate": 4.9765696622501846e-05,
120
+ "loss": 0.582,
121
+ "num_tokens": 52931516.0,
122
+ "step": 70
123
+ },
124
+ {
125
+ "epoch": 0.10231923601637108,
126
+ "grad_norm": 0.383974552154541,
127
+ "learning_rate": 4.968694002580118e-05,
128
+ "loss": 0.5839,
129
+ "num_tokens": 56824861.0,
130
+ "step": 75
131
+ },
132
+ {
133
+ "epoch": 0.10914051841746249,
134
+ "grad_norm": 0.4282758831977844,
135
+ "learning_rate": 4.959688286169851e-05,
136
+ "loss": 0.5676,
137
+ "num_tokens": 60559854.0,
138
+ "step": 80
139
+ },
140
+ {
141
+ "epoch": 0.11596180081855388,
142
+ "grad_norm": 0.4353692829608917,
143
+ "learning_rate": 4.9495570999387685e-05,
144
+ "loss": 0.5613,
145
+ "num_tokens": 64468301.0,
146
+ "step": 85
147
+ },
148
+ {
149
+ "epoch": 0.12278308321964529,
150
+ "grad_norm": 0.4118047058582306,
151
+ "learning_rate": 4.9383056040465276e-05,
152
+ "loss": 0.5793,
153
+ "num_tokens": 68324862.0,
154
+ "step": 90
155
+ },
156
+ {
157
+ "epoch": 0.1296043656207367,
158
+ "grad_norm": 0.36806777119636536,
159
+ "learning_rate": 4.925939529264815e-05,
160
+ "loss": 0.5747,
161
+ "num_tokens": 72145111.0,
162
+ "step": 95
163
+ },
164
+ {
165
+ "epoch": 0.1364256480218281,
166
+ "grad_norm": 0.3960418403148651,
167
+ "learning_rate": 4.9124651740584684e-05,
168
+ "loss": 0.561,
169
+ "num_tokens": 76047530.0,
170
+ "step": 100
171
+ },
172
+ {
173
+ "epoch": 0.1432469304229195,
174
+ "grad_norm": 0.39603474736213684,
175
+ "learning_rate": 4.897889401377447e-05,
176
+ "loss": 0.5629,
177
+ "num_tokens": 80034081.0,
178
+ "step": 105
179
+ },
180
+ {
181
+ "epoch": 0.15006821282401092,
182
+ "grad_norm": 0.34311339259147644,
183
+ "learning_rate": 4.882219635161306e-05,
184
+ "loss": 0.5667,
185
+ "num_tokens": 83777176.0,
186
+ "step": 110
187
+ },
188
+ {
189
+ "epoch": 0.15688949522510232,
190
+ "grad_norm": 0.4239673614501953,
191
+ "learning_rate": 4.865463856557922e-05,
192
+ "loss": 0.5656,
193
+ "num_tokens": 87560813.0,
194
+ "step": 115
195
+ },
196
+ {
197
+ "epoch": 0.16371077762619374,
198
+ "grad_norm": 0.37248337268829346,
199
+ "learning_rate": 4.847630599858426e-05,
200
+ "loss": 0.5547,
201
+ "num_tokens": 91406508.0,
202
+ "step": 120
203
+ },
204
+ {
205
+ "epoch": 0.17053206002728513,
206
+ "grad_norm": 0.3312360346317291,
207
+ "learning_rate": 4.8287289481503954e-05,
208
+ "loss": 0.5616,
209
+ "num_tokens": 95296652.0,
210
+ "step": 125
211
+ },
212
+ {
213
+ "epoch": 0.17735334242837653,
214
+ "grad_norm": 0.3752099871635437,
215
+ "learning_rate": 4.8087685286915276e-05,
216
+ "loss": 0.5569,
217
+ "num_tokens": 99236540.0,
218
+ "step": 130
219
+ },
220
+ {
221
+ "epoch": 0.18417462482946795,
222
+ "grad_norm": 0.3752726912498474,
223
+ "learning_rate": 4.787759508006147e-05,
224
+ "loss": 0.5565,
225
+ "num_tokens": 103070715.0,
226
+ "step": 135
227
+ },
228
+ {
229
+ "epoch": 0.19099590723055934,
230
+ "grad_norm": 0.40951457619667053,
231
+ "learning_rate": 4.765712586707048e-05,
232
+ "loss": 0.5694,
233
+ "num_tokens": 106743213.0,
234
+ "step": 140
235
+ },
236
+ {
237
+ "epoch": 0.19781718963165076,
238
+ "grad_norm": 0.39596056938171387,
239
+ "learning_rate": 4.7426389940453065e-05,
240
+ "loss": 0.5418,
241
+ "num_tokens": 110676628.0,
242
+ "step": 145
243
+ },
244
+ {
245
+ "epoch": 0.20463847203274216,
246
+ "grad_norm": 0.4065423309803009,
247
+ "learning_rate": 4.718550482190837e-05,
248
+ "loss": 0.5578,
249
+ "num_tokens": 114351641.0,
250
+ "step": 150
251
+ },
252
+ {
253
+ "epoch": 0.21145975443383355,
254
+ "grad_norm": 0.3387225568294525,
255
+ "learning_rate": 4.6934593202466127e-05,
256
+ "loss": 0.5424,
257
+ "num_tokens": 118270253.0,
258
+ "step": 155
259
+ },
260
+ {
261
+ "epoch": 0.21828103683492497,
262
+ "grad_norm": 0.3324713110923767,
263
+ "learning_rate": 4.6673782879995896e-05,
264
+ "loss": 0.5511,
265
+ "num_tokens": 122130553.0,
266
+ "step": 160
267
+ },
268
+ {
269
+ "epoch": 0.22510231923601637,
270
+ "grad_norm": 0.34876254200935364,
271
+ "learning_rate": 4.640320669411526e-05,
272
+ "loss": 0.5539,
273
+ "num_tokens": 125907714.0,
274
+ "step": 165
275
+ },
276
+ {
277
+ "epoch": 0.23192360163710776,
278
+ "grad_norm": 0.3520093858242035,
279
+ "learning_rate": 4.612300245853004e-05,
280
+ "loss": 0.5473,
281
+ "num_tokens": 129778594.0,
282
+ "step": 170
283
+ },
284
+ {
285
+ "epoch": 0.23874488403819918,
286
+ "grad_norm": 0.4421618580818176,
287
+ "learning_rate": 4.5833312890841085e-05,
288
+ "loss": 0.562,
289
+ "num_tokens": 133567859.0,
290
+ "step": 175
291
+ },
292
+ {
293
+ "epoch": 0.24556616643929058,
294
+ "grad_norm": 0.3951200544834137,
295
+ "learning_rate": 4.553428553985329e-05,
296
+ "loss": 0.5416,
297
+ "num_tokens": 137318470.0,
298
+ "step": 180
299
+ },
300
+ {
301
+ "epoch": 0.252387448840382,
302
+ "grad_norm": 0.37818825244903564,
303
+ "learning_rate": 4.522607271042399e-05,
304
+ "loss": 0.5366,
305
+ "num_tokens": 140986549.0,
306
+ "step": 185
307
+ },
308
+ {
309
+ "epoch": 0.2592087312414734,
310
+ "grad_norm": 0.43682247400283813,
311
+ "learning_rate": 4.490883138588882e-05,
312
+ "loss": 0.5479,
313
+ "num_tokens": 144921526.0,
314
+ "step": 190
315
+ },
316
+ {
317
+ "epoch": 0.2660300136425648,
318
+ "grad_norm": 0.32613101601600647,
319
+ "learning_rate": 4.458272314810479e-05,
320
+ "loss": 0.5358,
321
+ "num_tokens": 148719256.0,
322
+ "step": 195
323
+ },
324
+ {
325
+ "epoch": 0.2728512960436562,
326
+ "grad_norm": 0.31748515367507935,
327
+ "learning_rate": 4.4247914095151086e-05,
328
+ "loss": 0.5457,
329
+ "num_tokens": 152583124.0,
330
+ "step": 200
331
+ },
332
+ {
333
+ "epoch": 0.27967257844474763,
334
+ "grad_norm": 0.3875013589859009,
335
+ "learning_rate": 4.390457475672966e-05,
336
+ "loss": 0.5393,
337
+ "num_tokens": 156451358.0,
338
+ "step": 205
339
+ },
340
+ {
341
+ "epoch": 0.286493860845839,
342
+ "grad_norm": 0.33466753363609314,
343
+ "learning_rate": 4.35528800073086e-05,
344
+ "loss": 0.5408,
345
+ "num_tokens": 160195441.0,
346
+ "step": 210
347
+ },
348
+ {
349
+ "epoch": 0.2933151432469304,
350
+ "grad_norm": 0.3655596077442169,
351
+ "learning_rate": 4.31930089770526e-05,
352
+ "loss": 0.5442,
353
+ "num_tokens": 164130734.0,
354
+ "step": 215
355
+ },
356
+ {
357
+ "epoch": 0.30013642564802184,
358
+ "grad_norm": 0.35074111819267273,
359
+ "learning_rate": 4.282514496058582e-05,
360
+ "loss": 0.5236,
361
+ "num_tokens": 167974083.0,
362
+ "step": 220
363
+ },
364
+ {
365
+ "epoch": 0.3069577080491132,
366
+ "grad_norm": 0.3069048225879669,
367
+ "learning_rate": 4.24494753236337e-05,
368
+ "loss": 0.5363,
369
+ "num_tokens": 171877159.0,
370
+ "step": 225
371
+ },
372
+ {
373
+ "epoch": 0.31377899045020463,
374
+ "grad_norm": 0.3370104134082794,
375
+ "learning_rate": 4.2066191407591125e-05,
376
+ "loss": 0.5319,
377
+ "num_tokens": 175825874.0,
378
+ "step": 230
379
+ },
380
+ {
381
+ "epoch": 0.32060027285129605,
382
+ "grad_norm": 0.30355367064476013,
383
+ "learning_rate": 4.1675488432065785e-05,
384
+ "loss": 0.5242,
385
+ "num_tokens": 179651504.0,
386
+ "step": 235
387
+ },
388
+ {
389
+ "epoch": 0.3274215552523875,
390
+ "grad_norm": 0.29714441299438477,
391
+ "learning_rate": 4.127756539544609e-05,
392
+ "loss": 0.5368,
393
+ "num_tokens": 183474332.0,
394
+ "step": 240
395
+ },
396
+ {
397
+ "epoch": 0.33424283765347884,
398
+ "grad_norm": 0.34279632568359375,
399
+ "learning_rate": 4.087262497354452e-05,
400
+ "loss": 0.5453,
401
+ "num_tokens": 187421939.0,
402
+ "step": 245
403
+ },
404
+ {
405
+ "epoch": 0.34106412005457026,
406
+ "grad_norm": 0.3006781339645386,
407
+ "learning_rate": 4.046087341636789e-05,
408
+ "loss": 0.5278,
409
+ "num_tokens": 191229072.0,
410
+ "step": 250
411
+ },
412
+ {
413
+ "epoch": 0.3478854024556617,
414
+ "grad_norm": 0.30111509561538696,
415
+ "learning_rate": 4.0042520443067176e-05,
416
+ "loss": 0.529,
417
+ "num_tokens": 195078991.0,
418
+ "step": 255
419
+ },
420
+ {
421
+ "epoch": 0.35470668485675305,
422
+ "grad_norm": 0.3227461576461792,
423
+ "learning_rate": 3.961777913512035e-05,
424
+ "loss": 0.5181,
425
+ "num_tokens": 198920961.0,
426
+ "step": 260
427
+ },
428
+ {
429
+ "epoch": 0.3615279672578445,
430
+ "grad_norm": 0.30752789974212646,
431
+ "learning_rate": 3.9186865827802724e-05,
432
+ "loss": 0.5377,
433
+ "num_tokens": 202602950.0,
434
+ "step": 265
435
+ },
436
+ {
437
+ "epoch": 0.3683492496589359,
438
+ "grad_norm": 0.32034164667129517,
439
+ "learning_rate": 3.875e-05,
440
+ "loss": 0.5265,
441
+ "num_tokens": 206455454.0,
442
+ "step": 270
443
+ },
444
+ {
445
+ "epoch": 0.37517053206002726,
446
+ "grad_norm": 0.28475409746170044,
447
+ "learning_rate": 3.830740416242014e-05,
448
+ "loss": 0.5223,
449
+ "num_tokens": 210274230.0,
450
+ "step": 275
451
+ },
452
+ {
453
+ "epoch": 0.3819918144611187,
454
+ "grad_norm": 0.30314069986343384,
455
+ "learning_rate": 3.7859303744261064e-05,
456
+ "loss": 0.5281,
457
+ "num_tokens": 213944648.0,
458
+ "step": 280
459
+ },
460
+ {
461
+ "epoch": 0.3888130968622101,
462
+ "grad_norm": 0.28140079975128174,
463
+ "learning_rate": 3.740592697839185e-05,
464
+ "loss": 0.5328,
465
+ "num_tokens": 217821264.0,
466
+ "step": 285
467
+ },
468
+ {
469
+ "epoch": 0.3956343792633015,
470
+ "grad_norm": 0.26929807662963867,
471
+ "learning_rate": 3.694750478510596e-05,
472
+ "loss": 0.5284,
473
+ "num_tokens": 221728874.0,
474
+ "step": 290
475
+ },
476
+ {
477
+ "epoch": 0.4024556616643929,
478
+ "grad_norm": 0.26649391651153564,
479
+ "learning_rate": 3.648427065450555e-05,
480
+ "loss": 0.5196,
481
+ "num_tokens": 225494536.0,
482
+ "step": 295
483
+ },
484
+ {
485
+ "epoch": 0.4092769440654843,
486
+ "grad_norm": 0.29355040192604065,
487
+ "learning_rate": 3.601646052757707e-05,
488
+ "loss": 0.5188,
489
+ "num_tokens": 229370762.0,
490
+ "step": 300
491
+ },
492
+ {
493
+ "epoch": 0.41609822646657574,
494
+ "grad_norm": 0.3135109543800354,
495
+ "learning_rate": 3.55443126760184e-05,
496
+ "loss": 0.5343,
497
+ "num_tokens": 233272121.0,
498
+ "step": 305
499
+ },
500
+ {
501
+ "epoch": 0.4229195088676671,
502
+ "grad_norm": 0.3388509154319763,
503
+ "learning_rate": 3.506806758087894e-05,
504
+ "loss": 0.5319,
505
+ "num_tokens": 237043352.0,
506
+ "step": 310
507
+ },
508
+ {
509
+ "epoch": 0.4297407912687585,
510
+ "grad_norm": 0.30362528562545776,
511
+ "learning_rate": 3.458796781007437e-05,
512
+ "loss": 0.5266,
513
+ "num_tokens": 240757499.0,
514
+ "step": 315
515
+ },
516
+ {
517
+ "epoch": 0.43656207366984995,
518
+ "grad_norm": 0.28180354833602905,
519
+ "learning_rate": 3.410425789483854e-05,
520
+ "loss": 0.527,
521
+ "num_tokens": 244605555.0,
522
+ "step": 320
523
+ },
524
+ {
525
+ "epoch": 0.4433833560709413,
526
+ "grad_norm": 0.28672918677330017,
527
+ "learning_rate": 3.3617184205175304e-05,
528
+ "loss": 0.5334,
529
+ "num_tokens": 248382993.0,
530
+ "step": 325
531
+ },
532
+ {
533
+ "epoch": 0.45020463847203274,
534
+ "grad_norm": 0.33284738659858704,
535
+ "learning_rate": 3.312699482437392e-05,
536
+ "loss": 0.5206,
537
+ "num_tokens": 252233466.0,
538
+ "step": 330
539
+ },
540
+ {
541
+ "epoch": 0.45702592087312416,
542
+ "grad_norm": 0.28790685534477234,
543
+ "learning_rate": 3.263393942265168e-05,
544
+ "loss": 0.5273,
545
+ "num_tokens": 256038440.0,
546
+ "step": 335
547
+ },
548
+ {
549
+ "epoch": 0.4638472032742155,
550
+ "grad_norm": 0.25374555587768555,
551
+ "learning_rate": 3.213826912998838e-05,
552
+ "loss": 0.5197,
553
+ "num_tokens": 260071254.0,
554
+ "step": 340
555
+ },
556
+ {
557
+ "epoch": 0.47066848567530695,
558
+ "grad_norm": 0.26900210976600647,
559
+ "learning_rate": 3.164023640821719e-05,
560
+ "loss": 0.513,
561
+ "num_tokens": 263897078.0,
562
+ "step": 345
563
+ },
564
+ {
565
+ "epoch": 0.47748976807639837,
566
+ "grad_norm": 0.27621471881866455,
567
+ "learning_rate": 3.114009492243721e-05,
568
+ "loss": 0.5214,
569
+ "num_tokens": 267702266.0,
570
+ "step": 350
571
+ },
572
+ {
573
+ "epoch": 0.4843110504774898,
574
+ "grad_norm": 0.28301048278808594,
575
+ "learning_rate": 3.063809941181321e-05,
576
+ "loss": 0.531,
577
+ "num_tokens": 271571889.0,
578
+ "step": 355
579
+ },
580
+ {
581
+ "epoch": 0.49113233287858116,
582
+ "grad_norm": 0.28320661187171936,
583
+ "learning_rate": 3.0134505559828203e-05,
584
+ "loss": 0.5348,
585
+ "num_tokens": 275444208.0,
586
+ "step": 360
587
+ },
588
+ {
589
+ "epoch": 0.4979536152796726,
590
+ "grad_norm": 0.2856563925743103,
591
+ "learning_rate": 2.9629569864055125e-05,
592
+ "loss": 0.5128,
593
+ "num_tokens": 279090959.0,
594
+ "step": 365
595
+ },
596
+ {
597
+ "epoch": 0.504774897680764,
598
+ "grad_norm": 0.2746957540512085,
599
+ "learning_rate": 2.9123549505513868e-05,
600
+ "loss": 0.5148,
601
+ "num_tokens": 283042351.0,
602
+ "step": 370
603
+ },
604
+ {
605
+ "epoch": 0.5115961800818554,
606
+ "grad_norm": 0.27719607949256897,
607
+ "learning_rate": 2.8616702217680134e-05,
608
+ "loss": 0.5228,
609
+ "num_tokens": 286947035.0,
610
+ "step": 375
611
+ },
612
+ {
613
+ "epoch": 0.5184174624829468,
614
+ "grad_norm": 0.27276405692100525,
615
+ "learning_rate": 2.810928615521303e-05,
616
+ "loss": 0.5095,
617
+ "num_tokens": 290627149.0,
618
+ "step": 380
619
+ },
620
+ {
621
+ "epoch": 0.5252387448840382,
622
+ "grad_norm": 0.27591395378112793,
623
+ "learning_rate": 2.7601559762468022e-05,
624
+ "loss": 0.5187,
625
+ "num_tokens": 294445695.0,
626
+ "step": 385
627
+ },
628
+ {
629
+ "epoch": 0.5320600272851296,
630
+ "grad_norm": 0.27681204676628113,
631
+ "learning_rate": 2.7093781641862387e-05,
632
+ "loss": 0.5212,
633
+ "num_tokens": 298235939.0,
634
+ "step": 390
635
+ },
636
+ {
637
+ "epoch": 0.538881309686221,
638
+ "grad_norm": 0.2667984366416931,
639
+ "learning_rate": 2.658621042216021e-05,
640
+ "loss": 0.5054,
641
+ "num_tokens": 301940350.0,
642
+ "step": 395
643
+ },
644
+ {
645
+ "epoch": 0.5457025920873124,
646
+ "grad_norm": 0.6714840531349182,
647
+ "learning_rate": 2.6079104626743845e-05,
648
+ "loss": 0.5256,
649
+ "num_tokens": 305677252.0,
650
+ "step": 400
651
+ },
652
+ {
653
+ "epoch": 0.5525238744884038,
654
+ "grad_norm": 0.2727600634098053,
655
+ "learning_rate": 2.5572722541939113e-05,
656
+ "loss": 0.5248,
657
+ "num_tokens": 309492997.0,
658
+ "step": 405
659
+ },
660
+ {
661
+ "epoch": 0.5593451568894953,
662
+ "grad_norm": 0.28910964727401733,
663
+ "learning_rate": 2.5067322085461315e-05,
664
+ "loss": 0.5102,
665
+ "num_tokens": 313260754.0,
666
+ "step": 410
667
+ },
668
+ {
669
+ "epoch": 0.5661664392905866,
670
+ "grad_norm": 0.30671924352645874,
671
+ "learning_rate": 2.4563160675048846e-05,
672
+ "loss": 0.5153,
673
+ "num_tokens": 317054734.0,
674
+ "step": 415
675
+ },
676
+ {
677
+ "epoch": 0.572987721691678,
678
+ "grad_norm": 0.2649187743663788,
679
+ "learning_rate": 2.406049509735156e-05,
680
+ "loss": 0.5151,
681
+ "num_tokens": 320934035.0,
682
+ "step": 420
683
+ },
684
+ {
685
+ "epoch": 0.5798090040927695,
686
+ "grad_norm": 0.28437790274620056,
687
+ "learning_rate": 2.355958137714056e-05,
688
+ "loss": 0.5106,
689
+ "num_tokens": 324620533.0,
690
+ "step": 425
691
+ },
692
+ {
693
+ "epoch": 0.5866302864938608,
694
+ "grad_norm": 0.2717227041721344,
695
+ "learning_rate": 2.3060674646906004e-05,
696
+ "loss": 0.5152,
697
+ "num_tokens": 328343683.0,
698
+ "step": 430
699
+ },
700
+ {
701
+ "epoch": 0.5934515688949522,
702
+ "grad_norm": 0.3002876341342926,
703
+ "learning_rate": 2.2564029016909416e-05,
704
+ "loss": 0.5046,
705
+ "num_tokens": 332273995.0,
706
+ "step": 435
707
+ },
708
+ {
709
+ "epoch": 0.6002728512960437,
710
+ "grad_norm": 0.26823899149894714,
711
+ "learning_rate": 2.2069897445756627e-05,
712
+ "loss": 0.5026,
713
+ "num_tokens": 336096910.0,
714
+ "step": 440
715
+ },
716
+ {
717
+ "epoch": 0.607094133697135,
718
+ "grad_norm": 0.26657503843307495,
719
+ "learning_rate": 2.1578531611557322e-05,
720
+ "loss": 0.5156,
721
+ "num_tokens": 339854518.0,
722
+ "step": 445
723
+ },
724
+ {
725
+ "epoch": 0.6139154160982264,
726
+ "grad_norm": 0.2396160513162613,
727
+ "learning_rate": 2.109018178373675e-05,
728
+ "loss": 0.5144,
729
+ "num_tokens": 343729017.0,
730
+ "step": 450
731
+ },
732
+ {
733
+ "epoch": 0.6207366984993179,
734
+ "grad_norm": 0.25998055934906006,
735
+ "learning_rate": 2.0605096695564973e-05,
736
+ "loss": 0.5179,
737
+ "num_tokens": 347564883.0,
738
+ "step": 455
739
+ },
740
+ {
741
+ "epoch": 0.6275579809004093,
742
+ "grad_norm": 0.2405814677476883,
743
+ "learning_rate": 2.0123523417468466e-05,
744
+ "loss": 0.5112,
745
+ "num_tokens": 351296309.0,
746
+ "step": 460
747
+ },
748
+ {
749
+ "epoch": 0.6343792633015006,
750
+ "grad_norm": 0.23936094343662262,
751
+ "learning_rate": 1.9645707231188742e-05,
752
+ "loss": 0.5055,
753
+ "num_tokens": 355112109.0,
754
+ "step": 465
755
+ },
756
+ {
757
+ "epoch": 0.6412005457025921,
758
+ "grad_norm": 0.23796042799949646,
759
+ "learning_rate": 1.9171891504851925e-05,
760
+ "loss": 0.5243,
761
+ "num_tokens": 358904864.0,
762
+ "step": 470
763
+ },
764
+ {
765
+ "epoch": 0.6480218281036835,
766
+ "grad_norm": 0.2545931339263916,
767
+ "learning_rate": 1.8702317569013094e-05,
768
+ "loss": 0.5002,
769
+ "num_tokens": 362651560.0,
770
+ "step": 475
771
+ },
772
+ {
773
+ "epoch": 0.654843110504775,
774
+ "grad_norm": 0.23106758296489716,
775
+ "learning_rate": 1.8237224593738327e-05,
776
+ "loss": 0.5025,
777
+ "num_tokens": 366319152.0,
778
+ "step": 480
779
+ },
780
+ {
781
+ "epoch": 0.6616643929058663,
782
+ "grad_norm": 0.22513054311275482,
783
+ "learning_rate": 1.7776849466787223e-05,
784
+ "loss": 0.5168,
785
+ "num_tokens": 370176088.0,
786
+ "step": 485
787
+ },
788
+ {
789
+ "epoch": 0.6684856753069577,
790
+ "grad_norm": 0.23235899209976196,
791
+ "learning_rate": 1.7321426672957896e-05,
792
+ "loss": 0.5022,
793
+ "num_tokens": 374011109.0,
794
+ "step": 490
795
+ },
796
+ {
797
+ "epoch": 0.6753069577080492,
798
+ "grad_norm": 0.2550266683101654,
799
+ "learning_rate": 1.6871188174655787e-05,
800
+ "loss": 0.4954,
801
+ "num_tokens": 377769452.0,
802
+ "step": 495
803
+ },
804
+ {
805
+ "epoch": 0.6821282401091405,
806
+ "grad_norm": 0.23668882250785828,
807
+ "learning_rate": 1.6426363293747334e-05,
808
+ "loss": 0.4998,
809
+ "num_tokens": 381536770.0,
810
+ "step": 500
811
+ },
812
+ {
813
+ "epoch": 0.6889495225102319,
814
+ "grad_norm": 0.24165432155132294,
815
+ "learning_rate": 1.598717859475846e-05,
816
+ "loss": 0.5084,
817
+ "num_tokens": 385264947.0,
818
+ "step": 505
819
+ },
820
+ {
821
+ "epoch": 0.6957708049113234,
822
+ "grad_norm": 0.235661581158638,
823
+ "learning_rate": 1.5553857769477553e-05,
824
+ "loss": 0.5052,
825
+ "num_tokens": 389008667.0,
826
+ "step": 510
827
+ },
828
+ {
829
+ "epoch": 0.7025920873124147,
830
+ "grad_norm": 0.2295641452074051,
831
+ "learning_rate": 1.5126621523021518e-05,
832
+ "loss": 0.5097,
833
+ "num_tokens": 392845122.0,
834
+ "step": 515
835
+ },
836
+ {
837
+ "epoch": 0.7094133697135061,
838
+ "grad_norm": 0.259378582239151,
839
+ "learning_rate": 1.4705687461423209e-05,
840
+ "loss": 0.522,
841
+ "num_tokens": 396569410.0,
842
+ "step": 520
843
+ },
844
+ {
845
+ "epoch": 0.7162346521145976,
846
+ "grad_norm": 0.24214191734790802,
847
+ "learning_rate": 1.4291269980797139e-05,
848
+ "loss": 0.5062,
849
+ "num_tokens": 400328978.0,
850
+ "step": 525
851
+ },
852
+ {
853
+ "epoch": 0.723055934515689,
854
+ "grad_norm": 0.22774960100650787,
855
+ "learning_rate": 1.3883580158140291e-05,
856
+ "loss": 0.5002,
857
+ "num_tokens": 404085025.0,
858
+ "step": 530
859
+ },
860
+ {
861
+ "epoch": 0.7298772169167803,
862
+ "grad_norm": 0.21534228324890137,
863
+ "learning_rate": 1.3482825643823293e-05,
864
+ "loss": 0.5058,
865
+ "num_tokens": 407976499.0,
866
+ "step": 535
867
+ },
868
+ {
869
+ "epoch": 0.7366984993178718,
870
+ "grad_norm": 0.21766649186611176,
871
+ "learning_rate": 1.3089210555827086e-05,
872
+ "loss": 0.5116,
873
+ "num_tokens": 411774021.0,
874
+ "step": 540
875
+ },
876
+ {
877
+ "epoch": 0.7435197817189632,
878
+ "grad_norm": 0.21238680183887482,
879
+ "learning_rate": 1.270293537577855e-05,
880
+ "loss": 0.5145,
881
+ "num_tokens": 415754417.0,
882
+ "step": 545
883
+ },
884
+ {
885
+ "epoch": 0.7503410641200545,
886
+ "grad_norm": 0.22271254658699036,
887
+ "learning_rate": 1.232419684683844e-05,
888
+ "loss": 0.4992,
889
+ "num_tokens": 419428701.0,
890
+ "step": 550
891
+ },
892
+ {
893
+ "epoch": 0.757162346521146,
894
+ "grad_norm": 0.22422295808792114,
895
+ "learning_rate": 1.1953187873493303e-05,
896
+ "loss": 0.4998,
897
+ "num_tokens": 423056747.0,
898
+ "step": 555
899
+ },
900
+ {
901
+ "epoch": 0.7639836289222374,
902
+ "grad_norm": 0.2289811670780182,
903
+ "learning_rate": 1.1590097423302684e-05,
904
+ "loss": 0.4957,
905
+ "num_tokens": 426771236.0,
906
+ "step": 560
907
+ },
908
+ {
909
+ "epoch": 0.7708049113233287,
910
+ "grad_norm": 0.21832554042339325,
911
+ "learning_rate": 1.1235110430651421e-05,
912
+ "loss": 0.4956,
913
+ "num_tokens": 430648058.0,
914
+ "step": 565
915
+ },
916
+ {
917
+ "epoch": 0.7776261937244202,
918
+ "grad_norm": 0.23512613773345947,
919
+ "learning_rate": 1.0888407702556284e-05,
920
+ "loss": 0.4995,
921
+ "num_tokens": 434432023.0,
922
+ "step": 570
923
+ },
924
+ {
925
+ "epoch": 0.7844474761255116,
926
+ "grad_norm": 0.2369619607925415,
927
+ "learning_rate": 1.0550165826574766e-05,
928
+ "loss": 0.4993,
929
+ "num_tokens": 438355231.0,
930
+ "step": 575
931
+ },
932
+ {
933
+ "epoch": 0.791268758526603,
934
+ "grad_norm": 0.23256513476371765,
935
+ "learning_rate": 1.0220557080862985e-05,
936
+ "loss": 0.5145,
937
+ "num_tokens": 442388385.0,
938
+ "step": 580
939
+ },
940
+ {
941
+ "epoch": 0.7980900409276944,
942
+ "grad_norm": 0.2141689658164978,
943
+ "learning_rate": 9.899749346428556e-06,
944
+ "loss": 0.5017,
945
+ "num_tokens": 446189045.0,
946
+ "step": 585
947
+ },
948
+ {
949
+ "epoch": 0.8049113233287858,
950
+ "grad_norm": 0.2098773866891861,
951
+ "learning_rate": 9.587906021623016e-06,
952
+ "loss": 0.5158,
953
+ "num_tokens": 450018716.0,
954
+ "step": 590
955
+ },
956
+ {
957
+ "epoch": 0.8117326057298773,
958
+ "grad_norm": 0.23991893231868744,
959
+ "learning_rate": 9.28518593891749e-06,
960
+ "loss": 0.501,
961
+ "num_tokens": 453809691.0,
962
+ "step": 595
963
+ },
964
+ {
965
+ "epoch": 0.8185538881309686,
966
+ "grad_norm": 0.22266173362731934,
967
+ "learning_rate": 8.99174328400385e-06,
968
+ "loss": 0.4993,
969
+ "num_tokens": 457645977.0,
970
+ "step": 600
971
+ },
972
+ {
973
+ "epoch": 0.82537517053206,
974
+ "grad_norm": 0.2336045503616333,
975
+ "learning_rate": 8.707727517262697e-06,
976
+ "loss": 0.5047,
977
+ "num_tokens": 461370305.0,
978
+ "step": 605
979
+ },
980
+ {
981
+ "epoch": 0.8321964529331515,
982
+ "grad_norm": 0.20964659750461578,
983
+ "learning_rate": 8.433283297638053e-06,
984
+ "loss": 0.4989,
985
+ "num_tokens": 465282670.0,
986
+ "step": 610
987
+ },
988
+ {
989
+ "epoch": 0.8390177353342428,
990
+ "grad_norm": 0.21320775151252747,
991
+ "learning_rate": 8.168550408957632e-06,
992
+ "loss": 0.4967,
993
+ "num_tokens": 469094631.0,
994
+ "step": 615
995
+ },
996
+ {
997
+ "epoch": 0.8458390177353342,
998
+ "grad_norm": 0.22750438749790192,
999
+ "learning_rate": 7.91366368873613e-06,
1000
+ "loss": 0.4941,
1001
+ "num_tokens": 472868071.0,
1002
+ "step": 620
1003
+ },
1004
+ {
1005
+ "epoch": 0.8526603001364257,
1006
+ "grad_norm": 0.225652813911438,
1007
+ "learning_rate": 7.66875295949791e-06,
1008
+ "loss": 0.5101,
1009
+ "num_tokens": 476693164.0,
1010
+ "step": 625
1011
+ },
1012
+ {
1013
+ "epoch": 0.859481582537517,
1014
+ "grad_norm": 0.2049553096294403,
1015
+ "learning_rate": 7.4339429626539e-06,
1016
+ "loss": 0.5098,
1017
+ "num_tokens": 480634992.0,
1018
+ "step": 630
1019
+ },
1020
+ {
1021
+ "epoch": 0.8663028649386084,
1022
+ "grad_norm": 0.2328362911939621,
1023
+ "learning_rate": 7.2093532949665715e-06,
1024
+ "loss": 0.5077,
1025
+ "num_tokens": 484452349.0,
1026
+ "step": 635
1027
+ },
1028
+ {
1029
+ "epoch": 0.8731241473396999,
1030
+ "grad_norm": 0.20552393794059753,
1031
+ "learning_rate": 6.995098347635173e-06,
1032
+ "loss": 0.493,
1033
+ "num_tokens": 488334317.0,
1034
+ "step": 640
1035
+ },
1036
+ {
1037
+ "epoch": 0.8799454297407913,
1038
+ "grad_norm": 0.20875284075737,
1039
+ "learning_rate": 6.791287248032431e-06,
1040
+ "loss": 0.4962,
1041
+ "num_tokens": 492162119.0,
1042
+ "step": 645
1043
+ },
1044
+ {
1045
+ "epoch": 0.8867667121418826,
1046
+ "grad_norm": 0.21407166123390198,
1047
+ "learning_rate": 6.598023804122194e-06,
1048
+ "loss": 0.5035,
1049
+ "num_tokens": 496169094.0,
1050
+ "step": 650
1051
+ },
1052
+ {
1053
+ "epoch": 0.8935879945429741,
1054
+ "grad_norm": 0.20252534747123718,
1055
+ "learning_rate": 6.415406451586528e-06,
1056
+ "loss": 0.499,
1057
+ "num_tokens": 500122283.0,
1058
+ "step": 655
1059
+ },
1060
+ {
1061
+ "epoch": 0.9004092769440655,
1062
+ "grad_norm": 0.20611464977264404,
1063
+ "learning_rate": 6.243528203689025e-06,
1064
+ "loss": 0.5028,
1065
+ "num_tokens": 504062446.0,
1066
+ "step": 660
1067
+ },
1068
+ {
1069
+ "epoch": 0.9072305593451568,
1070
+ "grad_norm": 0.2153824418783188,
1071
+ "learning_rate": 6.0824766039e-06,
1072
+ "loss": 0.499,
1073
+ "num_tokens": 507853633.0,
1074
+ "step": 665
1075
+ },
1076
+ {
1077
+ "epoch": 0.9140518417462483,
1078
+ "grad_norm": 0.20805135369300842,
1079
+ "learning_rate": 5.932333681307571e-06,
1080
+ "loss": 0.5058,
1081
+ "num_tokens": 511714869.0,
1082
+ "step": 670
1083
+ },
1084
+ {
1085
+ "epoch": 0.9208731241473397,
1086
+ "grad_norm": 0.2085290253162384,
1087
+ "learning_rate": 5.793175908837471e-06,
1088
+ "loss": 0.4964,
1089
+ "num_tokens": 515451192.0,
1090
+ "step": 675
1091
+ },
1092
+ {
1093
+ "epoch": 0.927694406548431,
1094
+ "grad_norm": 0.20749785006046295,
1095
+ "learning_rate": 5.665074164302742e-06,
1096
+ "loss": 0.506,
1097
+ "num_tokens": 519195745.0,
1098
+ "step": 680
1099
+ },
1100
+ {
1101
+ "epoch": 0.9345156889495225,
1102
+ "grad_norm": 0.19413329660892487,
1103
+ "learning_rate": 5.548093694303275e-06,
1104
+ "loss": 0.4915,
1105
+ "num_tokens": 523017567.0,
1106
+ "step": 685
1107
+ },
1108
+ {
1109
+ "epoch": 0.9413369713506139,
1110
+ "grad_norm": 0.21901412308216095,
1111
+ "learning_rate": 5.442294080993446e-06,
1112
+ "loss": 0.5056,
1113
+ "num_tokens": 526884969.0,
1114
+ "step": 690
1115
+ },
1116
+ {
1117
+ "epoch": 0.9481582537517054,
1118
+ "grad_norm": 0.20552243292331696,
1119
+ "learning_rate": 5.347729211734919e-06,
1120
+ "loss": 0.503,
1121
+ "num_tokens": 530678785.0,
1122
+ "step": 695
1123
+ },
1124
+ {
1125
+ "epoch": 0.9549795361527967,
1126
+ "grad_norm": 0.20113253593444824,
1127
+ "learning_rate": 5.264447251649954e-06,
1128
+ "loss": 0.5055,
1129
+ "num_tokens": 534460009.0,
1130
+ "step": 700
1131
+ },
1132
+ {
1133
+ "epoch": 0.9618008185538881,
1134
+ "grad_norm": 0.2043074369430542,
1135
+ "learning_rate": 5.192490619089267e-06,
1136
+ "loss": 0.4897,
1137
+ "num_tokens": 538338585.0,
1138
+ "step": 705
1139
+ },
1140
+ {
1141
+ "epoch": 0.9686221009549796,
1142
+ "grad_norm": 0.2070448249578476,
1143
+ "learning_rate": 5.1318959640269095e-06,
1144
+ "loss": 0.5002,
1145
+ "num_tokens": 542019550.0,
1146
+ "step": 710
1147
+ },
1148
+ {
1149
+ "epoch": 0.975443383356071,
1150
+ "grad_norm": 0.21117159724235535,
1151
+ "learning_rate": 5.082694149393189e-06,
1152
+ "loss": 0.5111,
1153
+ "num_tokens": 545767802.0,
1154
+ "step": 715
1155
+ },
1156
+ {
1157
+ "epoch": 0.9822646657571623,
1158
+ "grad_norm": 0.20505015552043915,
1159
+ "learning_rate": 5.044910235355121e-06,
1160
+ "loss": 0.497,
1161
+ "num_tokens": 549561878.0,
1162
+ "step": 720
1163
+ },
1164
+ {
1165
+ "epoch": 0.9890859481582538,
1166
+ "grad_norm": 0.19267314672470093,
1167
+ "learning_rate": 5.0185634665524255e-06,
1168
+ "loss": 0.493,
1169
+ "num_tokens": 553402412.0,
1170
+ "step": 725
1171
+ },
1172
+ {
1173
+ "epoch": 0.9959072305593452,
1174
+ "grad_norm": 0.18925239145755768,
1175
+ "learning_rate": 5.003667262295572e-06,
1176
+ "loss": 0.5012,
1177
+ "num_tokens": 557174057.0,
1178
+ "step": 730
1179
+ },
1180
+ {
1181
+ "epoch": 1.0,
1182
+ "num_tokens": 559480552.0,
1183
+ "step": 733,
1184
+ "total_flos": 1.0371853292601868e+19,
1185
+ "train_loss": 0.5346580615310357,
1186
+ "train_runtime": 5110.5186,
1187
+ "train_samples_per_second": 18.341,
1188
+ "train_steps_per_second": 0.143
1189
+ }
1190
+ ],
1191
+ "logging_steps": 5,
1192
+ "max_steps": 733,
1193
+ "num_input_tokens_seen": 0,
1194
+ "num_train_epochs": 1,
1195
+ "save_steps": 100,
1196
+ "stateful_callbacks": {
1197
+ "TrainerControl": {
1198
+ "args": {
1199
+ "should_epoch_stop": false,
1200
+ "should_evaluate": false,
1201
+ "should_log": false,
1202
+ "should_save": true,
1203
+ "should_training_stop": true
1204
+ },
1205
+ "attributes": {}
1206
+ }
1207
+ },
1208
+ "total_flos": 1.0371853292601868e+19,
1209
+ "train_batch_size": 16,
1210
+ "trial_name": null,
1211
+ "trial_params": null
1212
+ }