qingyangzhang commited on
Commit
4b592b0
·
verified ·
1 Parent(s): 5e28e06

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen2.5-3B-Open-R1-GRPO-Self-TQA
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen2.5-3B-Open-R1-GRPO-Self-TQA
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="qingyangzhang/Qwen2.5-3B-Open-R1-GRPO-Self-TQA", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/zqyoung1127-tianjin-university/huggingface/runs/yloacszm)
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.14.0
37
+ - Transformers: 4.48.3
38
+ - Pytorch: 2.5.1+cu124
39
+ - Datasets: 3.1.0
40
+ - Tokenizers: 0.21.0
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{zhihong2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0001855157711543143,
4
+ "train_runtime": 422.7212,
5
+ "train_samples": 817,
6
+ "train_samples_per_second": 1.933,
7
+ "train_steps_per_second": 0.059
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.3"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0001855157711543143,
4
+ "train_runtime": 422.7212,
5
+ "train_samples": 817,
6
+ "train_samples_per_second": 1.933,
7
+ "train_steps_per_second": 0.059
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.975609756097561,
5
+ "eval_steps": 100,
6
+ "global_step": 25,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 22.076171875,
13
+ "epoch": 0.03902439024390244,
14
+ "grad_norm": 0.697234570980072,
15
+ "kl": 0.0,
16
+ "learning_rate": 6.666666666666666e-07,
17
+ "loss": 0.0,
18
+ "reward": 0.3046875,
19
+ "reward_std": 0.3262644410133362,
20
+ "rewards/semantic_entropy": 0.3046875,
21
+ "step": 1
22
+ },
23
+ {
24
+ "completion_length": 26.513671875,
25
+ "epoch": 0.07804878048780488,
26
+ "grad_norm": 0.44604623317718506,
27
+ "kl": 0.0,
28
+ "learning_rate": 1.3333333333333332e-06,
29
+ "loss": 0.0,
30
+ "reward": 0.40625,
31
+ "reward_std": 0.42861204594373703,
32
+ "rewards/semantic_entropy": 0.40625,
33
+ "step": 2
34
+ },
35
+ {
36
+ "completion_length": 22.556640625,
37
+ "epoch": 0.11707317073170732,
38
+ "grad_norm": 1.134758472442627,
39
+ "kl": 0.001371145248413086,
40
+ "learning_rate": 2e-06,
41
+ "loss": 0.0001,
42
+ "reward": 0.30078125,
43
+ "reward_std": 0.30227910727262497,
44
+ "rewards/semantic_entropy": 0.30078125,
45
+ "step": 3
46
+ },
47
+ {
48
+ "completion_length": 25.80078125,
49
+ "epoch": 0.15609756097560976,
50
+ "grad_norm": 0.8177841305732727,
51
+ "kl": 0.0005015134811401367,
52
+ "learning_rate": 1.9898214418809326e-06,
53
+ "loss": 0.0,
54
+ "reward": 0.302734375,
55
+ "reward_std": 0.3620207440108061,
56
+ "rewards/semantic_entropy": 0.302734375,
57
+ "step": 4
58
+ },
59
+ {
60
+ "completion_length": 23.333984375,
61
+ "epoch": 0.1951219512195122,
62
+ "grad_norm": 1.1548794507980347,
63
+ "kl": 0.0008560121059417725,
64
+ "learning_rate": 1.9594929736144973e-06,
65
+ "loss": 0.0,
66
+ "reward": 0.279296875,
67
+ "reward_std": 0.26689392141997814,
68
+ "rewards/semantic_entropy": 0.279296875,
69
+ "step": 5
70
+ },
71
+ {
72
+ "completion_length": 28.08984375,
73
+ "epoch": 0.23414634146341465,
74
+ "grad_norm": 2.489468812942505,
75
+ "kl": 0.008920669555664062,
76
+ "learning_rate": 1.9096319953545185e-06,
77
+ "loss": 0.0004,
78
+ "reward": 0.390625,
79
+ "reward_std": 0.3596664369106293,
80
+ "rewards/semantic_entropy": 0.390625,
81
+ "step": 6
82
+ },
83
+ {
84
+ "completion_length": 30.310546875,
85
+ "epoch": 0.2731707317073171,
86
+ "grad_norm": 0.4479486048221588,
87
+ "kl": 0.008846282958984375,
88
+ "learning_rate": 1.8412535328311812e-06,
89
+ "loss": 0.0004,
90
+ "reward": 0.4140625,
91
+ "reward_std": 0.41363558918237686,
92
+ "rewards/semantic_entropy": 0.4140625,
93
+ "step": 7
94
+ },
95
+ {
96
+ "completion_length": 30.3671875,
97
+ "epoch": 0.3121951219512195,
98
+ "grad_norm": 0.38852936029434204,
99
+ "kl": 0.0185089111328125,
100
+ "learning_rate": 1.7557495743542582e-06,
101
+ "loss": 0.0007,
102
+ "reward": 0.380859375,
103
+ "reward_std": 0.3279299959540367,
104
+ "rewards/semantic_entropy": 0.380859375,
105
+ "step": 8
106
+ },
107
+ {
108
+ "completion_length": 32.099609375,
109
+ "epoch": 0.35121951219512193,
110
+ "grad_norm": 0.4612264037132263,
111
+ "kl": 0.03018951416015625,
112
+ "learning_rate": 1.6548607339452852e-06,
113
+ "loss": 0.0012,
114
+ "reward": 0.3671875,
115
+ "reward_std": 0.40921078994870186,
116
+ "rewards/semantic_entropy": 0.3671875,
117
+ "step": 9
118
+ },
119
+ {
120
+ "completion_length": 34.060546875,
121
+ "epoch": 0.3902439024390244,
122
+ "grad_norm": 0.4247353971004486,
123
+ "kl": 0.029693603515625,
124
+ "learning_rate": 1.5406408174555977e-06,
125
+ "loss": 0.0012,
126
+ "reward": 0.408203125,
127
+ "reward_std": 0.4044051952660084,
128
+ "rewards/semantic_entropy": 0.408203125,
129
+ "step": 10
130
+ },
131
+ {
132
+ "completion_length": 32.00390625,
133
+ "epoch": 0.4292682926829268,
134
+ "grad_norm": 0.41794028878211975,
135
+ "kl": 0.0347137451171875,
136
+ "learning_rate": 1.4154150130018865e-06,
137
+ "loss": 0.0014,
138
+ "reward": 0.337890625,
139
+ "reward_std": 0.34299428947269917,
140
+ "rewards/semantic_entropy": 0.337890625,
141
+ "step": 11
142
+ },
143
+ {
144
+ "completion_length": 38.1015625,
145
+ "epoch": 0.4682926829268293,
146
+ "grad_norm": 0.32552075386047363,
147
+ "kl": 0.0347442626953125,
148
+ "learning_rate": 1.2817325568414297e-06,
149
+ "loss": 0.0014,
150
+ "reward": 0.400390625,
151
+ "reward_std": 0.4432142451405525,
152
+ "rewards/semantic_entropy": 0.400390625,
153
+ "step": 12
154
+ },
155
+ {
156
+ "completion_length": 38.935546875,
157
+ "epoch": 0.5073170731707317,
158
+ "grad_norm": 0.3407377600669861,
159
+ "kl": 0.045013427734375,
160
+ "learning_rate": 1.1423148382732853e-06,
161
+ "loss": 0.0018,
162
+ "reward": 0.41015625,
163
+ "reward_std": 0.4198991097509861,
164
+ "rewards/semantic_entropy": 0.41015625,
165
+ "step": 13
166
+ },
167
+ {
168
+ "completion_length": 33.248046875,
169
+ "epoch": 0.5463414634146342,
170
+ "grad_norm": 0.7667419910430908,
171
+ "kl": 0.03955078125,
172
+ "learning_rate": 1e-06,
173
+ "loss": 0.0016,
174
+ "reward": 0.3515625,
175
+ "reward_std": 0.39246053621172905,
176
+ "rewards/semantic_entropy": 0.3515625,
177
+ "step": 14
178
+ },
179
+ {
180
+ "completion_length": 34.533203125,
181
+ "epoch": 0.5853658536585366,
182
+ "grad_norm": 0.6600065231323242,
183
+ "kl": 0.045623779296875,
184
+ "learning_rate": 8.576851617267149e-07,
185
+ "loss": 0.0018,
186
+ "reward": 0.388671875,
187
+ "reward_std": 0.3779917135834694,
188
+ "rewards/semantic_entropy": 0.388671875,
189
+ "step": 15
190
+ },
191
+ {
192
+ "completion_length": 31.439453125,
193
+ "epoch": 0.624390243902439,
194
+ "grad_norm": 0.5913363695144653,
195
+ "kl": 0.03285980224609375,
196
+ "learning_rate": 7.182674431585702e-07,
197
+ "loss": 0.0013,
198
+ "reward": 0.41796875,
199
+ "reward_std": 0.3863699361681938,
200
+ "rewards/semantic_entropy": 0.41796875,
201
+ "step": 16
202
+ },
203
+ {
204
+ "completion_length": 31.44140625,
205
+ "epoch": 0.6634146341463415,
206
+ "grad_norm": 0.342813640832901,
207
+ "kl": 0.030029296875,
208
+ "learning_rate": 5.845849869981136e-07,
209
+ "loss": 0.0012,
210
+ "reward": 0.365234375,
211
+ "reward_std": 0.3471006993204355,
212
+ "rewards/semantic_entropy": 0.365234375,
213
+ "step": 17
214
+ },
215
+ {
216
+ "completion_length": 30.06640625,
217
+ "epoch": 0.7024390243902439,
218
+ "grad_norm": 0.5251995325088501,
219
+ "kl": 0.0410003662109375,
220
+ "learning_rate": 4.5935918254440274e-07,
221
+ "loss": 0.0016,
222
+ "reward": 0.33203125,
223
+ "reward_std": 0.3720228523015976,
224
+ "rewards/semantic_entropy": 0.33203125,
225
+ "step": 18
226
+ },
227
+ {
228
+ "completion_length": 36.03125,
229
+ "epoch": 0.7414634146341463,
230
+ "grad_norm": 0.44380083680152893,
231
+ "kl": 0.02307891845703125,
232
+ "learning_rate": 3.45139266054715e-07,
233
+ "loss": 0.0009,
234
+ "reward": 0.369140625,
235
+ "reward_std": 0.40155548974871635,
236
+ "rewards/semantic_entropy": 0.369140625,
237
+ "step": 19
238
+ },
239
+ {
240
+ "completion_length": 29.765625,
241
+ "epoch": 0.7804878048780488,
242
+ "grad_norm": 0.5805388689041138,
243
+ "kl": 0.03347015380859375,
244
+ "learning_rate": 2.4425042564574185e-07,
245
+ "loss": 0.0013,
246
+ "reward": 0.376953125,
247
+ "reward_std": 0.37975445948541164,
248
+ "rewards/semantic_entropy": 0.376953125,
249
+ "step": 20
250
+ },
251
+ {
252
+ "completion_length": 33.17578125,
253
+ "epoch": 0.8195121951219512,
254
+ "grad_norm": 0.4464017450809479,
255
+ "kl": 0.011322021484375,
256
+ "learning_rate": 1.5874646716881868e-07,
257
+ "loss": 0.0005,
258
+ "reward": 0.3671875,
259
+ "reward_std": 0.40596737898886204,
260
+ "rewards/semantic_entropy": 0.3671875,
261
+ "step": 21
262
+ },
263
+ {
264
+ "completion_length": 26.4453125,
265
+ "epoch": 0.8585365853658536,
266
+ "grad_norm": 0.5445716977119446,
267
+ "kl": 0.028438568115234375,
268
+ "learning_rate": 9.036800464548156e-08,
269
+ "loss": 0.0011,
270
+ "reward": 0.4375,
271
+ "reward_std": 0.39070899225771427,
272
+ "rewards/semantic_entropy": 0.4375,
273
+ "step": 22
274
+ },
275
+ {
276
+ "completion_length": 28.7578125,
277
+ "epoch": 0.8975609756097561,
278
+ "grad_norm": 0.49012911319732666,
279
+ "kl": 0.018596649169921875,
280
+ "learning_rate": 4.050702638550274e-08,
281
+ "loss": 0.0007,
282
+ "reward": 0.39453125,
283
+ "reward_std": 0.3807212747633457,
284
+ "rewards/semantic_entropy": 0.39453125,
285
+ "step": 23
286
+ },
287
+ {
288
+ "completion_length": 26.87109375,
289
+ "epoch": 0.9365853658536586,
290
+ "grad_norm": 0.47687703371047974,
291
+ "kl": 0.03133201599121094,
292
+ "learning_rate": 1.0178558119067315e-08,
293
+ "loss": 0.0013,
294
+ "reward": 0.30859375,
295
+ "reward_std": 0.3204674646258354,
296
+ "rewards/semantic_entropy": 0.30859375,
297
+ "step": 24
298
+ },
299
+ {
300
+ "completion_length": 28.25390625,
301
+ "epoch": 0.975609756097561,
302
+ "grad_norm": 0.623159646987915,
303
+ "kl": 0.02616119384765625,
304
+ "learning_rate": 0.0,
305
+ "loss": 0.001,
306
+ "reward": 0.375,
307
+ "reward_std": 0.3317893110215664,
308
+ "rewards/semantic_entropy": 0.375,
309
+ "step": 25
310
+ },
311
+ {
312
+ "epoch": 0.975609756097561,
313
+ "step": 25,
314
+ "total_flos": 0.0,
315
+ "train_loss": 0.0001855157711543143,
316
+ "train_runtime": 422.7212,
317
+ "train_samples_per_second": 1.933,
318
+ "train_steps_per_second": 0.059
319
+ }
320
+ ],
321
+ "logging_steps": 1,
322
+ "max_steps": 25,
323
+ "num_input_tokens_seen": 0,
324
+ "num_train_epochs": 1,
325
+ "save_steps": 10,
326
+ "stateful_callbacks": {
327
+ "TrainerControl": {
328
+ "args": {
329
+ "should_epoch_stop": false,
330
+ "should_evaluate": false,
331
+ "should_log": false,
332
+ "should_save": true,
333
+ "should_training_stop": true
334
+ },
335
+ "attributes": {}
336
+ }
337
+ },
338
+ "total_flos": 0.0,
339
+ "train_batch_size": 1,
340
+ "trial_name": null,
341
+ "trial_params": null
342
+ }