diff --git a/scripts/run_1.14G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..62634c74828e55bdd5faa3d4290333e826d0b31b --- /dev/null +++ b/scripts/run_1.14G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..866657fce8ed78795fe08833f0f558ec61d37865 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..18a07381294700945df7d378c209273e2e55a651 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp128_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp128_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..736926efdd576718396cedb3ac7474cf67a100f4 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp128_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp128_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp128_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp128_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp128_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4676cae8e8b5b3d02d56efd6fdfee8c4fc3e86d --- /dev/null +++ b/scripts/run_1.14G_dp2_tp128_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp128_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp128_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp32_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp32_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bb99d89680a37216559fa4edaf255db883bd085 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp32_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp32_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp32_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp8_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp8_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e006d379966f6e0ccf8b19b691f0a831b48529f1 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp8_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1885031c890433f5dc77bf549c6abbf300f38740 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5561c8e2da0754eaf43d58ff1fcc5c8abc13a0a5 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e47b0363feafabfa8b9a39df1571ba8467f4834d --- /dev/null +++ b/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp128_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp128_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6f4faa641c00a7e77f2536c5462f9e2da0c6139 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp128_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp128_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp128_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp16_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp16_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..14e8fd9b2fecbc4613b5128bc420f79540617a72 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp16_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp16_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp16_pp1_acc4_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab1a5ea25794d84dcf57ab5022aaa8c164a78285 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d60faa2a8bb4f2329169871b410e2ab4c1c9d21 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp64_tp1_pp2_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp64_tp1_pp2_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0ca5e0eb569734e21aa079150a7d17e7c112ed50 --- /dev/null +++ b/scripts/run_1.14G_dp64_tp1_pp2_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp64_tp1_pp2_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp64_tp1_pp2_acc2_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..637376fc12cf3b6b7215c9ced8f21cabb412f233 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp2_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp2_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec7392d402c4d563a52328ba4ebd891c4114fce6 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp2_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp2_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp2_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp32_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp32_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ad614854f39a773008d8069bc7607c4c91aa51a --- /dev/null +++ b/scripts/run_1.14G_dp8_tp32_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp32_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp32_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp4_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp4_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b582291f837330eeea523a202431243431872904 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp4_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp4_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp4_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ffc38fe799957919b095289c84c13739d39385e --- /dev/null +++ b/scripts/run_1.14G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp64_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b8e8349b72550875fd75d4e1a6acac9dde4e8bad --- /dev/null +++ b/scripts/run_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp1_pp4_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3f48b21116de7d7ded538bc055f8fb0591fed319 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..94a98b5f98aa1fe2cd6d1e5dcc12ff09283c37e9 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp16_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c8230af2134506a909947f77532bd3c8dad49b05 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp4_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp4_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1aa2e4ccda2c031f7ebe1ca34c73cce48a2cd7e8 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp4_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp4_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp4_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4290a8799798d91244768d0551e60ceaaf34a608 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b53473e2b8f30e93d8ef4083fa98822faea164f --- /dev/null +++ b/scripts/run_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp4_pp4_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6bc568b8ebba5aae11c95817454f4cb5def829af --- /dev/null +++ b/scripts/run_1.34G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp1_acc256_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba532d842d7ae5bef324ffb3a03299c3e81ebb67 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..56c7456901da37fd6bfcc3c3f9921afeabb01a6e --- /dev/null +++ b/scripts/run_1.34G_dp2_tp16_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp16_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a5849a4847bdf365a22b84591d3348241db5288 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..991149323e3c0a09696a167b960b95f9fdb8a658 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2e0c5f30ffe81f4bef86dc60c47019a8135c6815 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp8_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0ca6e9107888c6c1363f1754ca298c235c184d84 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..112425c474b522edfe8b05355f22d16379f81bb7 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..af6d58f96afabcdfa7a88eb8fc54f9439cb7964b --- /dev/null +++ b/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..760d79fce6ff4aab1f25fc896dac8ebb6b67e9bb --- /dev/null +++ b/scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a75f56682f32cb98c9a0290de971469e9a166630 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp4_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp4_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..13851b301fc37a8d5334542d732c73f4c8c3d2a3 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp4_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp4_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..182021c23a15dc87fc3b9b534f453c0db6344314 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4535749cdcb454aeff0cf9df4c9a68e6527f2b39 --- /dev/null +++ b/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp64_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp64_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp1_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp1_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f99ab1502a94e565f54b1b1ec3f02993c9c0f0e9 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp1_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp1_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp1_pp1_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp1_pp2_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp1_pp2_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..63a0ea9d5ffa5e4e9acb268b95551316849fa8df --- /dev/null +++ b/scripts/run_1.34G_dp8_tp1_pp2_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp1_pp2_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e083a2e547fa9cfbca1e63c6d4e0b652ea9cebe4 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp4_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp8_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp8_tp8_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..53de201d1b031c70a171fb05243b861e07aa129e --- /dev/null +++ b/scripts/run_1.34G_dp8_tp8_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp8_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp8_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp8_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp8_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..509cf1f5a57140830be4f25e1320c436c983ac34 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp8_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp8_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp8_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4e3dbe6eab221073253efeebf5a7ec0c9d40a4d --- /dev/null +++ b/scripts/run_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8aa7ebae2ceef990867787ddf94ffa4ed7609595 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp1_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7ba801dd0bcd679e5b16627b74e9f7876d737292 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..02ca4f1569bcf1306ee977e60691a314130dfb45 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..394560405a7a8e362143e6a04d2f813c99cafb7b --- /dev/null +++ b/scripts/run_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_37.8G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh b/scripts/run_37.8G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef430cb049238cc1b9c3f2abc80e7860c82e5c98 --- /dev/null +++ b/scripts/run_37.8G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_37.8G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_37.8G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml diff --git a/scripts/run_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..811c1734289ce7625ced9d8e10c4f892a581511d --- /dev/null +++ b/scripts/run_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp16_tp4_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..44e7d6e0704829df6840037ce2626804e30816e6 --- /dev/null +++ b/scripts/run_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp2_tp16_pp2_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..831ffc1ea06bf85dfc357b374163cc0714343dba --- /dev/null +++ b/scripts/run_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp8_tp4_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp8_tp4_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6dd5c25c76697693f69a4c231c4b122bd356e5ea --- /dev/null +++ b/scripts/run_469G_dp8_tp4_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_469G_dp8_tp4_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp4_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.05G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh b/scripts/run_8.05G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a095e40a4118a1ff494f77e574922be64974bfbc --- /dev/null +++ b/scripts/run_8.05G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_8.05G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_8.05G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6b3b20bc4157eb698c2fac6701ba2dcbb3910556 --- /dev/null +++ b/scripts/run_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp128_tp1_pp1_acc1_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp1_tp1_pp32_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp1_tp1_pp32_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3bd4c4830287a247cb6e13453880d8bce1864855 --- /dev/null +++ b/scripts/run_8.86G_dp1_tp1_pp32_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_8.86G_dp1_tp1_pp32_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp1_pp32_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cb8bffea5dc097de87e02f6777471d1e518c288f --- /dev/null +++ b/scripts/run_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp2_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.sh b/scripts/run_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.sh new file mode 100644 index 0000000000000000000000000000000000000000..eb14e0aee07b9a6fe34e401e7c72291c13258580 --- /dev/null +++ b/scripts/run_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml +fi diff --git a/scripts/run_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bce60b6dea51f358e04f6c071934321938b77163 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..fc9d4748916816b81ad4cbd51539732d83701e6d --- /dev/null +++ b/scripts/run_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5beaec95edf915c94bfd306b097656d5ba791a6a --- /dev/null +++ b/scripts/run_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp2_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..dbea53442d81db3f3001ed463a98e4b8f15b2e52 --- /dev/null +++ b/scripts/run_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d90ac6f6d9b055d2d31e975d7084ebc99f1e6210 --- /dev/null +++ b/scripts/run_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp4_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c34c68f4d29f68ed0f7ce9577eff3b4e825c64f7 --- /dev/null +++ b/scripts/run_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..abe9c74ce71c37b10a6bd81ac26cbe2dd5442ae3 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c827d29c4b7a931717346844c7e3f736f4cbeba6 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bca1d8ef1b49c018a4fd6f0b9836defb2da83e37 --- /dev/null +++ b/scripts/run_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..88446613686706651302e98ae347e1ce91f063cc --- /dev/null +++ b/scripts/run_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec67cc2358a6e1f31458e794733cd4977a4035e6 --- /dev/null +++ b/scripts/run_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp16_tp8_pp1_acc8_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp1_tp8_pp1_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp8_pp1_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9757beeb5a744ceec0ae28893b4ec1d04ab68169 --- /dev/null +++ b/scripts/run_80G_dp1_tp8_pp1_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp8_pp1_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp8_pp1_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8750cc4014555dd6cb1b6859e3a7a40addabbe0d --- /dev/null +++ b/scripts/run_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp4_tp1_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a483ab5eef07eb7d278c2a6180dbf074cd3bd388 --- /dev/null +++ b/scripts/run_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp16_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi