Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- configs/config_1.07G_dp1_tp8_pp1_acc1_mbs1_seq131072_zero0_tpmodeRED_l15_h2048_heads16.yaml +91 -0
- configs/config_1.07G_dp1_tp8_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_l15_h2048_heads16.yaml +91 -0
- configs/config_1.14G_dp16_tp16_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp16_tp32_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp16_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp1_tp1_pp8_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp2_tp256_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp2_tp256_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp2_tp4_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp32_tp16_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp3_tp8_pp1_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.yaml +92 -0
- configs/config_1.14G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp4_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp8_tp2_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
- configs/config_1.14G_dp8_tp2_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp8_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.14G_dp8_tp64_pp1_acc1_mbs64_seq2048_zero1_tpmodeALL_vocab32k.yaml +91 -0
- configs/config_1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp16_tp1_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp1_tp16_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp1_tp32_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp256_tp1_pp1_acc2_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp32_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp2_tp8_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp64_tp1_pp2_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_1.34G_dp8_tp1_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp8_tp1_pp2_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
- configs/config_1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml +91 -0
- configs/config_187G_dp2_tp8_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.yaml +91 -0
configs/config_1.07G_dp1_tp8_pp1_acc1_mbs1_seq131072_zero0_tpmodeRED_l15_h2048_heads16.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: bench_seqlen.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.07G_dp1_tp8_pp1_acc1_mbs1_seq131072_zero0_tpmodeRED_l15_h2048_heads16
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 131072
|
42 |
+
num_attention_heads: 16
|
43 |
+
num_hidden_layers: 15
|
44 |
+
num_key_value_heads: 16
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 1
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 1
|
89 |
+
sequence_length: 131072
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.07G_dp1_tp8_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_l15_h2048_heads16.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: bench_tp.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.07G_dp1_tp8_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_l15_h2048_heads16
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 16
|
43 |
+
num_hidden_layers: 15
|
44 |
+
num_key_value_heads: 16
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 1
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp16_tp16_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp16_tp16_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 64
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp16_tp32_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp16_tp32_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 32
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp16_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp16_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp1_tp1_pp8_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp1_tp1_pp8_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 1
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 8
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 64
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp2_tp256_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp2_tp256_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 256
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 128
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp2_tp256_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp2_tp256_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 256
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp2_tp4_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp2_tp4_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 128
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp32_tp16_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp32_tp16_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 4
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 2
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 16
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 1
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp3_tp8_pp1_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.yaml
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp3_tp8_pp1_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 3
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler:
|
80 |
+
profiler_export_path: ./tb_logs
|
81 |
+
tokenizer:
|
82 |
+
tokenizer_max_length: null
|
83 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
84 |
+
tokenizer_revision: null
|
85 |
+
tokens:
|
86 |
+
batch_accumulation_per_replica: 1
|
87 |
+
limit_test_batches: 0
|
88 |
+
limit_val_batches: 0
|
89 |
+
micro_batch_size: 64
|
90 |
+
sequence_length: 2048
|
91 |
+
train_steps: 10
|
92 |
+
val_check_interval: 100
|
configs/config_1.14G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 128
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 16
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 128
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp4_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp4_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 32
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp8_tp2_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp8_tp2_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 2
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 16
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp8_tp2_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp8_tp2_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 2
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp8_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp8_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.14G_dp8_tp64_pp1_acc1_mbs64_seq2048_zero1_tpmodeALL_vocab32k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.14G_dp8_tp64_pp1_acc1_mbs64_seq2048_zero1_tpmodeALL_vocab32k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 64
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 64
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 128
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp16_tp1_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp16_tp1_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 32
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 16
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 2
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 4
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp1_tp16_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final2.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp1_tp16_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 32
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 1
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 8
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp1_tp32_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final2.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp1_tp32_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 32
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 1
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 2
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 32
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 128
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp256_tp1_pp1_acc2_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp256_tp1_pp1_acc2_mbs4_seq2048_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 256
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 128
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 4
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 256
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp32_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final2.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp32_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 32
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 2
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 32
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 64
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp2_tp8_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final2.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp2_tp8_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 32
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 4
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 4
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 32
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final2.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 4096
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 32
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 2
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 4096
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 32
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 16
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 4
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 128
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 32
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 32
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 64
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 16
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 4
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 64
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp64_tp1_pp2_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp64_tp1_pp2_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 64
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 2
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 1
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 64
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 2
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 4
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp8_tp1_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp8_tp1_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp8_tp1_pp2_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp8_tp1_pp2_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 2
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 1
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 128
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 32
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 2
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 2
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 32768
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 4
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 16
|
89 |
+
sequence_length: 32768
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: benchmark/results/bench_final.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 2048
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 8192
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 8192
|
42 |
+
num_attention_heads: 32
|
43 |
+
num_hidden_layers: 16
|
44 |
+
num_key_value_heads: 8
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: true
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 131072
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 1
|
71 |
+
parallelism:
|
72 |
+
dp: 8
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 1
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: false
|
78 |
+
tp_mode: ALL_REDUCE
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 8
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 8
|
89 |
+
sequence_length: 8192
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|
configs/config_187G_dp2_tp8_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.yaml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
checkpoints:
|
2 |
+
checkpoint_interval: 10000
|
3 |
+
checkpoints_path: checkpoints
|
4 |
+
checkpoints_path_is_shared_file_system: false
|
5 |
+
resume_checkpoint_path: null
|
6 |
+
save_initial_state: false
|
7 |
+
data_stages:
|
8 |
+
- data:
|
9 |
+
dataset: null
|
10 |
+
num_loading_workers: 1
|
11 |
+
seed: 42
|
12 |
+
name: Stable Training Stage
|
13 |
+
start_training_step: 1
|
14 |
+
general:
|
15 |
+
benchmark_csv_path: bench_seqlen.csv
|
16 |
+
consumed_train_samples: null
|
17 |
+
ignore_sanity_checks: true
|
18 |
+
project: debug
|
19 |
+
run: 187G_dp2_tp8_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128
|
20 |
+
seed: 42
|
21 |
+
step: null
|
22 |
+
lighteval: null
|
23 |
+
logging:
|
24 |
+
iteration_step_info_interval: 1
|
25 |
+
log_level: info
|
26 |
+
log_level_replica: info
|
27 |
+
model:
|
28 |
+
ddp_bucket_cap_mb: 25
|
29 |
+
dtype: bfloat16
|
30 |
+
init_method:
|
31 |
+
std: 0.02
|
32 |
+
make_vocab_size_divisible_by: 1
|
33 |
+
model_config:
|
34 |
+
bos_token_id: 0
|
35 |
+
eos_token_id: 0
|
36 |
+
hidden_act: silu
|
37 |
+
hidden_size: 16384
|
38 |
+
initializer_range: 0.02
|
39 |
+
intermediate_size: 53248
|
40 |
+
is_llama_config: true
|
41 |
+
max_position_embeddings: 2048
|
42 |
+
num_attention_heads: 128
|
43 |
+
num_hidden_layers: 126
|
44 |
+
num_key_value_heads: 128
|
45 |
+
pad_token_id: null
|
46 |
+
pretraining_tp: 1
|
47 |
+
rms_norm_eps: 1.0e-05
|
48 |
+
rope_scaling: null
|
49 |
+
tie_word_embeddings: false
|
50 |
+
use_cache: true
|
51 |
+
vocab_size: 32768
|
52 |
+
optimizer:
|
53 |
+
accumulate_grad_in_fp32: true
|
54 |
+
clip_grad: 1.0
|
55 |
+
learning_rate_scheduler:
|
56 |
+
learning_rate: 0.0003
|
57 |
+
lr_decay_starting_step: null
|
58 |
+
lr_decay_steps: 13
|
59 |
+
lr_decay_style: cosine
|
60 |
+
lr_warmup_steps: 2
|
61 |
+
lr_warmup_style: linear
|
62 |
+
min_decay_lr: 1.0e-05
|
63 |
+
optimizer_factory:
|
64 |
+
adam_beta1: 0.9
|
65 |
+
adam_beta2: 0.95
|
66 |
+
adam_eps: 1.0e-08
|
67 |
+
name: adamW
|
68 |
+
torch_adam_is_fused: true
|
69 |
+
weight_decay: 0.01
|
70 |
+
zero_stage: 0
|
71 |
+
parallelism:
|
72 |
+
dp: 2
|
73 |
+
expert_parallel_size: 1
|
74 |
+
pp: 32
|
75 |
+
pp_engine: 1f1b
|
76 |
+
tp: 8
|
77 |
+
tp_linear_async_communication: true
|
78 |
+
tp_mode: REDUCE_SCATTER
|
79 |
+
profiler: null
|
80 |
+
tokenizer:
|
81 |
+
tokenizer_max_length: null
|
82 |
+
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
|
83 |
+
tokenizer_revision: null
|
84 |
+
tokens:
|
85 |
+
batch_accumulation_per_replica: 1
|
86 |
+
limit_test_batches: 0
|
87 |
+
limit_val_batches: 0
|
88 |
+
micro_batch_size: 1
|
89 |
+
sequence_length: 2048
|
90 |
+
train_steps: 100
|
91 |
+
val_check_interval: 100
|