diff --git a/configs/config_1.14G_dp128_tp2_pp1_acc4_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp128_tp2_pp1_acc4_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..759b01d5c7811c4cfafe3e771eb39d82f7be2c84 --- /dev/null +++ b/configs/config_1.14G_dp128_tp2_pp1_acc4_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp2_pp1_acc4_mbs1_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp16_tp4_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp16_tp4_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fa1ae44099597c953c6df79502c987932cb3dcc --- /dev/null +++ b/configs/config_1.14G_dp16_tp4_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp4_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp256_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp256_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..017cd8a23611d60859a72d8575c9cd5fea4bf750 --- /dev/null +++ b/configs/config_1.14G_dp2_tp256_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp256_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp256_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp256_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..323fdd91f491f1e4b38705f17b74f9cc9ddbfbb2 --- /dev/null +++ b/configs/config_1.14G_dp2_tp256_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp256_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9aa2fa9acb7b045e0909194b3ce3d106da33734e --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp8_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp8_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dca7ed33b19f6d5d69a3895a3df3e0ba43026e97 --- /dev/null +++ b/configs/config_1.14G_dp2_tp8_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp8_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp1_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp1_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f9fa5e2883baed3636cca3007840c80caca6a79 --- /dev/null +++ b/configs/config_1.14G_dp32_tp1_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp1_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp16_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp16_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91d4d793c652ad24ed7338c4c15c412e19f6d0cb --- /dev/null +++ b/configs/config_1.14G_dp4_tp16_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp16_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fbf247a1101ef76032aa19d67da36e670b436223 --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..770bc006b6a900988ca351bdead5b4f5b7d25b0a --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp64_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp64_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e463f99b691a8a5d1b53cbdfc538530f4607132a --- /dev/null +++ b/configs/config_1.14G_dp4_tp64_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp64_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp1_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp1_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f773a912f62d4fed97874040e84c2cdf4630f45 --- /dev/null +++ b/configs/config_1.14G_dp64_tp1_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp1_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp1_pp1_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp1_pp1_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..631b38fdb70184c682c83a76cf14847682d171a8 --- /dev/null +++ b/configs/config_1.14G_dp64_tp1_pp1_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp1_pp1_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d7d88b54914e9d48e0d74daa68aa380044f78ba --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc16_mbs4_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..855a2c693c793211e3fe5508594f6c202195c772 --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e9b12037364c32d706000303423fb246a868cbc --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp2_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp2_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c4a9e5104d0b481f2f9efdc75f0ef0eef9ab20f --- /dev/null +++ b/configs/config_1.14G_dp8_tp2_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp2_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp2_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp2_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75693dca8eb3738e8033e10614c82165560c0f9d --- /dev/null +++ b/configs/config_1.14G_dp8_tp2_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp2_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af3997555f2937e34effa3adf3b59b95cb3fe472 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c4708015ab91b57d01edc4a90154db5920cf602 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp16_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d9fffded52187166d99e62f011b2678675ee011a --- /dev/null +++ b/configs/config_1.34G_dp16_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp1_tp16_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp1_tp16_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d7d72c7f52fe71c494479cb0a8793580dc88b61 --- /dev/null +++ b/configs/config_1.34G_dp1_tp16_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp1_tp16_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp128_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp128_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e59fd056b3192dc70e2183d1393429f7264111af --- /dev/null +++ b/configs/config_1.34G_dp2_tp128_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp128_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5034cebc67f69ac7649c0098124f53764e339741 --- /dev/null +++ b/configs/config_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0314073b0beed97df6092d2b1cc7ffb84de8c30f --- /dev/null +++ b/configs/config_1.34G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f8ed98bb626bad74373ea8d0f7db018537556fd --- /dev/null +++ b/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp1_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61351fe94afa849e1fec487d2b0f4ee962cad796 --- /dev/null +++ b/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp1_pp2_acc2_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..76f138eff916c4759494bf6a711746467010acbf --- /dev/null +++ b/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp4_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da20a9910df744b13406d5ddb297dc3eb90b4bd1 --- /dev/null +++ b/configs/config_1.34G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92cadc7b90f8177d3fb6a3acdd56e253731839a8 --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0cec394f7dcc22cc775e58102da8a732962ccc79 --- /dev/null +++ b/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp4_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0e75ea821f0633951ab756a3e66a19bbaa97534 --- /dev/null +++ b/configs/config_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp2_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp64_tp2_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08b574002dab4d7d087932c55b51e4db801da8ea --- /dev/null +++ b/configs/config_1.34G_dp64_tp2_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp2_pp1_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp4_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp64_tp4_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26129bf4fcb68d3960be0195ad8c25440d3358b9 --- /dev/null +++ b/configs/config_1.34G_dp64_tp4_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp4_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp1_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp1_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08401a35c08025e9caf132debf45b48e8cf2c24d --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp1_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp1_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e553dad907524d8af9e7bc98ffbb2bffc8516d4 --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..513324f199cc90611da9e9fc858fbd6b2e17a0a3 --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp2_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp2_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fbce06a4a7891fd0d32ece42f261821e92342c12 --- /dev/null +++ b/configs/config_1.34G_dp8_tp2_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp2_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6b22b9136cd5c833a55c86867a26c91505d3fe4 --- /dev/null +++ b/configs/config_1.34G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp8_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp8_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2bb1da2f6e0ccf280f28829756e6865c7fe5a55 --- /dev/null +++ b/configs/config_1.34G_dp8_tp8_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp8_pp1_acc32_mbs2_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e6dde5fbe2a720820a82c9e2162ef863ecd20a6 --- /dev/null +++ b/configs/config_1.34G_dp8_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_2.28G_dp16_tp8_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_l26_h2304_heads16.yaml b/configs/config_2.28G_dp16_tp8_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_l26_h2304_heads16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..757bba38d57cb8ff8ad55b5a8f92d767a79e61c3 --- /dev/null +++ b/configs/config_2.28G_dp16_tp8_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_l26_h2304_heads16.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_tp.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 2.28G_dp16_tp8_pp1_acc1_mbs32_seq2048_zero1_tpmodeRED_l26_h2304_heads16 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2304 + initializer_range: 0.02 + intermediate_size: 9216 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 16 + num_hidden_layers: 26 + num_key_value_heads: 16 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.yaml b/configs/config_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e425b96fb781cd5460701012dd2969b0ae531aa --- /dev/null +++ b/configs/config_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_elie.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 24 + num_hidden_layers: 28 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 128256 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp1_pp8_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp1_tp1_pp8_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8058991403b2f812c213da2a4be5e85e35e39bce --- /dev/null +++ b/configs/config_3.57G_dp1_tp1_pp8_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp1_pp8_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp2_pp1_acc1_mbs5_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml b/configs/config_3.57G_dp1_tp2_pp1_acc1_mbs5_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b72651b2693f86328e0f4c1bd59291deb2ceda1a --- /dev/null +++ b/configs/config_3.57G_dp1_tp2_pp1_acc1_mbs5_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp2_pp1_acc1_mbs5_seq4096_zero0_tpmodeRED_vocab131k_cache + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 5 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp2_tp2_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp2_tp2_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b528499e068f37fd3761cff849ff8d045b9a2fd --- /dev/null +++ b/configs/config_3.57G_dp2_tp2_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp2_tp2_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp2_tp32_pp4_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp2_tp32_pp4_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e839b5615ad4e725125d4b21f0cbc870b9e3e26c --- /dev/null +++ b/configs/config_3.57G_dp2_tp32_pp4_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp2_tp32_pp4_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp32_tp2_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp32_tp2_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..589d32291cefb34025064b3f8ae29c5600ebc42a --- /dev/null +++ b/configs/config_3.57G_dp32_tp2_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp32_tp2_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9adfa0bb73ca27fd0ad234d94e64b597f1feb335 --- /dev/null +++ b/configs/config_3.57G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp1_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp1_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49d54fd24e5a05779b6c7e6131d5e0007cbd3635 --- /dev/null +++ b/configs/config_3.57G_dp8_tp1_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp1_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.8G_dp1_tp1_pp8_acc7_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml b/configs/config_3.8G_dp1_tp1_pp8_acc7_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2bde55801f88a9b3a9285db604ecd4f81030d8e --- /dev/null +++ b/configs/config_3.8G_dp1_tp1_pp8_acc7_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.8G_dp1_tp1_pp8_acc7_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 30 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 7 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_466G_dp1_tp8_pp1_acc1_mbs2_seq2048_zero0_tpmodeRED_vocab32k.yaml b/configs/config_466G_dp1_tp8_pp1_acc1_mbs2_seq2048_zero0_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..59e4d58bc4703ca548fc3386374d0bf68f570729 --- /dev/null +++ b/configs/config_466G_dp1_tp8_pp1_acc1_mbs2_seq2048_zero0_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 466G_dp1_tp8_pp1_acc1_mbs2_seq2048_zero0_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp128_tp4_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp128_tp4_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc045f45c30a8e4885e6eedc606af30cc8b30fc0 --- /dev/null +++ b/configs/config_469G_dp128_tp4_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp128_tp4_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp16_tp2_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp16_tp2_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..43a5860a38cbf2a0593762b8d1b600d00c9904e4 --- /dev/null +++ b/configs/config_469G_dp16_tp2_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp16_tp2_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..278500b3f762e6a54d4e672daac4a3e3c48f311e --- /dev/null +++ b/configs/config_469G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp16_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp16_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d54787fb8927216293ff17f0dd1bb899a5cddf1d --- /dev/null +++ b/configs/config_469G_dp2_tp16_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp16_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d7975673f8334e066375330617f0b67e79c66c3 --- /dev/null +++ b/configs/config_469G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml b/configs/config_469G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cccd2e37bd314fb890920928cf6c1923d72f6996 --- /dev/null +++ b/configs/config_469G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k_gqa8 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp4_tp2_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp4_tp2_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b4a1e866b9f4025b39dd71d3d60467ae775e52a --- /dev/null +++ b/configs/config_469G_dp4_tp2_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp4_tp2_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp4_tp8_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp4_tp8_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70db9bc16b630c31241e2c576d1a461f8d3cdab9 --- /dev/null +++ b/configs/config_469G_dp4_tp8_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp4_tp8_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e8072c5b38db646fdd88c95b50d37d52acdd930 --- /dev/null +++ b/configs/config_469G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp16_pp2_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp16_pp2_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..07f6ae317f4af3cbf4305aba83f627fc2236f24a --- /dev/null +++ b/configs/config_469G_dp8_tp16_pp2_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp16_pp2_acc1_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_5.5G_dp2_tp4_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml b/configs/config_5.5G_dp2_tp4_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..428b0c451e0089fefc4897828376aff3bbe49a48 --- /dev/null +++ b/configs/config_5.5G_dp2_tp4_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 5.5G_dp2_tp4_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 64 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml b/configs/config_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9515dda79089d2f7767cd0c70126031d3d248883 --- /dev/null +++ b/configs/config_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp4_tp4_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp4_tp4_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6477e9042f63b41a5ca19b320a5f029e809c5b2 --- /dev/null +++ b/configs/config_8.86G_dp4_tp4_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp4_tp4_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..122026a3d03ab04ba671b564c68f9d1e63953c03 --- /dev/null +++ b/configs/config_8.86G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..732f7ca7b1807c8030a7d713f2da25b6ac1e1c62 --- /dev/null +++ b/configs/config_80G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0350b0b7715464c773386bc5dae4ba183c55217 --- /dev/null +++ b/configs/config_80G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6c89cde8c5ef7e35d29621004e0b1cd5a2b69b8 --- /dev/null +++ b/configs/config_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94a42121edbcfc56314aa987c20a7fdae3e3dc67 --- /dev/null +++ b/configs/config_80G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp32_tp4_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp32_tp4_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..35ee6ddf7e47bcbb2b30f8916cbabc4a266752f9 --- /dev/null +++ b/configs/config_80G_dp32_tp4_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp32_tp4_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp1_pp16_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp1_pp16_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d8fe64c3a51a136de7bc72a2ede3663aa603290 --- /dev/null +++ b/configs/config_80G_dp4_tp1_pp16_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp1_pp16_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6965be8a5111af5cb02570c8e70bb18542752e7a --- /dev/null +++ b/configs/config_80G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp8_tp4_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp8_tp4_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c72b721e11e1b8231cf04910c79655478adc0c7 --- /dev/null +++ b/configs/config_80G_dp8_tp4_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp8_tp4_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91d65ed1b50cfb64d4cf5795cc9f29a963568f1f --- /dev/null +++ b/configs/config_80G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100