diff --git a/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2364ff7ce5854ec7c6d2bf9745190dee9c64fe65 --- /dev/null +++ b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp2_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f49b786c92f1953cefe770961f8bf453ae1dbf64 --- /dev/null +++ b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp2_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp128_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp128_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9c522f22c59a823774543288f816b85ec8aeff1 --- /dev/null +++ b/configs/config_1.14G_dp2_tp128_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp128_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp128_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp128_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f91f1fcdc045e4775a7ba5709f92a3bb318aa8c --- /dev/null +++ b/configs/config_1.14G_dp2_tp128_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp128_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8940da6bc39e6d0c8fff995fa18344837c60d319 --- /dev/null +++ b/configs/config_1.14G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp1_pp4_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp1_pp4_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..172038727756843fd64fc54bd2a950162d8500b4 --- /dev/null +++ b/configs/config_1.14G_dp2_tp1_pp4_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp1_pp4_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9224335051bf48ed13f3393f643b9c49923e45c9 --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4891075983552dbbfce8a70d9400b9ca774647e --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64770a448e85a6f2721a934e8c3530815674ff32 --- /dev/null +++ b/configs/config_1.14G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fb576f966d2f46c71640527998cdb91fbc0781f --- /dev/null +++ b/configs/config_1.14G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..495ec316b06bfb3ab11964936b952452c759090b --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f63e35e9c21c616ae8455542c3f53483c748310 --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80a7e8b9f5d246010f34207c0665e09174d3c778 --- /dev/null +++ b/configs/config_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f04c0781fabca0f16f0f8c4e46ba2c5184ef1d93 --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..093e93f109fad1685d8c3422e731f7cb7853bb19 --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp64_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp64_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9012483fcb4ba8442852144d8ff1ef2408f4d48e --- /dev/null +++ b/configs/config_1.14G_dp4_tp64_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp64_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp8_pp16_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml b/configs/config_1.14G_dp4_tp8_pp16_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38da2810eee7f1a2103251b40770995816d9a4e1 --- /dev/null +++ b/configs/config_1.14G_dp4_tp8_pp16_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp8_pp16_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e3b93e55738ea1ef89b37f294272f8e68f28bbd --- /dev/null +++ b/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp1_pp2_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp4_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp4_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02bf2985a52e0a3bf24a04fd6c2d0505f34220d5 --- /dev/null +++ b/configs/config_1.14G_dp64_tp4_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp4_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..43390be99657db3ce800497fbec36a884958cc22 --- /dev/null +++ b/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp4_pp1_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e010294c05fb5f59e988b3e4ded06346a6eff3ce --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9198ce8435c5fdc82b858284274433a1fe486651 --- /dev/null +++ b/configs/config_1.14G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp128_tp1_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp128_tp1_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ad46af70380a5269e448de2e3a7da72dde5688a --- /dev/null +++ b/configs/config_1.34G_dp128_tp1_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp128_tp1_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4eabd2420b9159fb2dee87e046523e5fa505554 --- /dev/null +++ b/configs/config_1.34G_dp16_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db36818091519fe94c8baca0dedf6a49163daf80 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64c9c800d1090c8d9074cecde8dc92f257f41ba6 --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab52a3ef0dca12eaa02bd5c57fac299a64036d02 --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2282832572b1f97c3afdc185b640a0dda25987c4 --- /dev/null +++ b/configs/config_1.34G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3557bef6b16ef062fa64bd55b8dccf501e76e19a --- /dev/null +++ b/configs/config_1.34G_dp2_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp256_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp256_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ada2e0704ce69fc427c66f689325fad3f5b6f09 --- /dev/null +++ b/configs/config_1.34G_dp2_tp256_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp256_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..734a6ffdcd3b8ed49bc0a64168758c0eb200fd88 --- /dev/null +++ b/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp256_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..771d0375bb3ba5c27e21ca42257fd3a77c861683 --- /dev/null +++ b/configs/config_1.34G_dp2_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp32_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4f3f1c57f5da21bc3e35a20bc7f01a457a49419 --- /dev/null +++ b/configs/config_1.34G_dp32_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp16_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp32_tp16_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75622e37d17ced87df8ce08e1f2bd965cf82ce08 --- /dev/null +++ b/configs/config_1.34G_dp32_tp16_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp16_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp2_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp2_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab2e22ddaa91808714220de60b89db4502d83eb2 --- /dev/null +++ b/configs/config_1.34G_dp32_tp2_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp2_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp4_pp2_acc2_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp4_pp2_acc2_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..407040a64a4697d3be59a8c5f03f0caf33fa3f14 --- /dev/null +++ b/configs/config_1.34G_dp32_tp4_pp2_acc2_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp4_pp2_acc2_mbs64_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e98631442802819323f9068073affd48ed4b83f2 --- /dev/null +++ b/configs/config_1.34G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp128_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp128_pp1_acc64_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp128_pp1_acc64_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2a8875d8fb4ca4f3e368f47fc12b7efba249a03 --- /dev/null +++ b/configs/config_1.34G_dp4_tp128_pp1_acc64_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp128_pp1_acc64_mbs2_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc128_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc128_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e18747987d14fc6ccadb99df8122c072c7358d23 --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc128_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc128_mbs1_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93289399bc486d41c0274b369a481f1eb967f794 --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ada4d424843a334680456029abb95ff3fbb5eb4 --- /dev/null +++ b/configs/config_1.34G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..577c8fd27b990ddbb99710c6cb1464e067066ed0 --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp4_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp4_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..806124e608132699cb9871fc117373184ad3243e --- /dev/null +++ b/configs/config_1.34G_dp4_tp4_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp4_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp64_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04f3a28137dea4048a3a5a52fdd24f12eab8ce5e --- /dev/null +++ b/configs/config_1.34G_dp64_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..568e519ccc460dba17e8a3fd6e2fea725868d9ae --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp1_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp1_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78d6d42a082f45c12e677d7e049870182d6158c4 --- /dev/null +++ b/configs/config_1.34G_dp8_tp1_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp1_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp1_pp2_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp1_pp2_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a062dbd60f0498b60fbf645792b9f3f3a3b900c9 --- /dev/null +++ b/configs/config_1.34G_dp8_tp1_pp2_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2_pp.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp1_pp2_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..919a958134ad13edfc6d412d30edb94e076288bd --- /dev/null +++ b/configs/config_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp16_tp32_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp16_tp32_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70b292b886307759f2861bb159c9e9bafc0c8afb --- /dev/null +++ b/configs/config_3.57G_dp16_tp32_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp16_tp32_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e25d744e870474561757e9a70b5a687b4f602df --- /dev/null +++ b/configs/config_3.57G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b980ab1d023185e1def054dd5c2e8cd07e60cdea --- /dev/null +++ b/configs/config_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp16_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp16_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7599af61f45f2024fbec3acccc947fec2228fd73 --- /dev/null +++ b/configs/config_3.57G_dp4_tp16_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp16_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp2_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp2_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..246374b0f6c0bf7a3c98b3c7efbdf7b42a493134 --- /dev/null +++ b/configs/config_3.57G_dp4_tp2_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp2_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp2_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp2_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cceadeed2e619693af654fe38f64985db703ba6b --- /dev/null +++ b/configs/config_3.57G_dp4_tp2_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp2_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp8_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp8_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aea8b2746f4f2155ba423c3d6eb2943640ff599f --- /dev/null +++ b/configs/config_3.57G_dp4_tp8_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp8_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp16_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp16_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3cdb2e4cf5a810f3f87a0418d8727ccfd54bcd0 --- /dev/null +++ b/configs/config_3.57G_dp8_tp16_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp16_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp16_tp4_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp16_tp4_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e349a53d746add2e4e21a1d3d0dee31b9eb2503 --- /dev/null +++ b/configs/config_469G_dp16_tp4_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp16_tp4_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp1_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp1_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4243a0274fa493b133b8b45b5d72d77d39258698 --- /dev/null +++ b/configs/config_469G_dp8_tp1_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp1_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_5.5G_dp8_tp1_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml b/configs/config_5.5G_dp8_tp1_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..016637c8c72932e30efdff809423721ef52752f0 --- /dev/null +++ b/configs/config_5.5G_dp8_tp1_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 5.5G_dp8_tp1_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 64 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_78.4G_dp1_tp8_pp1_acc1_mbs32_seq2048_zero0_tpmodeRED_vocab32k.yaml b/configs/config_78.4G_dp1_tp8_pp1_acc1_mbs32_seq2048_zero0_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e0a646383d78761cb2880d0073a79b776dd0e7a --- /dev/null +++ b/configs/config_78.4G_dp1_tp8_pp1_acc1_mbs32_seq2048_zero0_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 78.4G_dp1_tp8_pp1_acc1_mbs32_seq2048_zero0_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp16_tp1_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp16_tp1_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9d52b2d9936be350429cfe3dde0cd94f35f312a --- /dev/null +++ b/configs/config_8.86G_dp16_tp1_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp16_tp1_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp16_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp16_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..585d108c898745ab57b64d6956c518d9eb0a7edc --- /dev/null +++ b/configs/config_8.86G_dp1_tp16_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp16_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp1_pp32_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp1_pp32_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..949d74cb3bc3473da584b36d5c0f3c5beb88941f --- /dev/null +++ b/configs/config_8.86G_dp2_tp1_pp32_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp1_pp32_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp2_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp2_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e7de55e2ee5635feb8f5b040f5a29c921ff0d5b --- /dev/null +++ b/configs/config_8.86G_dp2_tp2_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp2_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp32_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp32_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9bbdfeffd5a223c35e82cfd283f1d11fa6c6feb --- /dev/null +++ b/configs/config_8.86G_dp2_tp32_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp32_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp2_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp2_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6598e9da952787c86ea6f50c8bb448ca1cd59ff --- /dev/null +++ b/configs/config_80G_dp16_tp2_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp2_pp2_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp8_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp8_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..399219e366cffd39037d08553ff255d7ffc80bd8 --- /dev/null +++ b/configs/config_80G_dp1_tp8_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp8_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp8_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp8_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4fa2aa79b343f6891b0cbc6aa7da8a0ccd6f6c1 --- /dev/null +++ b/configs/config_80G_dp1_tp8_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp8_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b0f2e10add940d8ba933ca7a08a132262cd5147 --- /dev/null +++ b/configs/config_80G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp2_pp32_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp2_pp32_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ea862872889a601858dcfafded291e3f69dc814 --- /dev/null +++ b/configs/config_80G_dp2_tp2_pp32_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp2_pp32_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp4_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp4_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09f95be5edba9800328c50046e61327022e97335 --- /dev/null +++ b/configs/config_80G_dp2_tp4_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp4_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0667ddf8b1d351fbe8dd912b037a86a874ee19b5 --- /dev/null +++ b/configs/config_80G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp4_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp4_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dde2afa065c831b8cde3415ac119430ab2b0d7eb --- /dev/null +++ b/configs/config_80G_dp4_tp4_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp4_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp4_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp4_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c20040890d2b937004df81b57aabb9d169d965b --- /dev/null +++ b/configs/config_80G_dp4_tp4_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp4_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp8_tp8_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml b/configs/config_80G_dp8_tp8_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..300489c7ac4b80b14555b1e8b8e59eae1819f48f --- /dev/null +++ b/configs/config_80G_dp8_tp8_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp8_tp8_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100