diff --git a/scripts/run_1.14G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d09370e0fa93a803dee913c00053fdeb4e473e96 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d6a8b799c8860defa75a35b7161880235f735e5 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d57eec71a62bd58e2204cc922e9bdede5bb0fcf --- /dev/null +++ b/scripts/run_1.14G_dp16_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp1_pp4_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp1_pp4_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..95bdf0ef696c6d8db59d87666cb44f30b8470ee8 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp1_pp4_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp1_pp4_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp1_pp4_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp4_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp4_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d4a7607f7548ff225eafba3041bf8e0d2b8e5f4 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp4_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp4_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp4_pp1_acc128_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp4_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp4_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..529e57816c79983640cbdde8a0e9b51356540ed6 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp4_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp4_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp4_pp1_acc1_mbs256_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f328f5c9b6a9c9d119e4eeede090c5578f5a98d4 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a59aed241cfd150a7b4d64ff9c3f66fb38c5a820 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ec979a076627e660292dfda40460618c32697b7 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..48bd9ecfe02142ac9b9b00661d1dd25340bf5504 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp2_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp2_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp2_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp4_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp4_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f591f2d3c3a4c2e48c2319071efb5f788bac44ce --- /dev/null +++ b/scripts/run_1.14G_dp32_tp4_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp4_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp4_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp32_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0543fc4379d46a2fb167cadec1c3b6a7fd5aa23c --- /dev/null +++ b/scripts/run_1.14G_dp32_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp8_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp128_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp128_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4981288d6e26111617be265d102d6599ee76753e --- /dev/null +++ b/scripts/run_1.14G_dp4_tp128_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp128_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp128_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..14142ac98c2f183f2442c7cf44e87ff8f55bddd8 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc3a738c3e8c7a48d04a12b817f17fd6a3d1163c --- /dev/null +++ b/scripts/run_1.14G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a5f3daff995740c95f19eac62e7bf38c08095edf --- /dev/null +++ b/scripts/run_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp4_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp4_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2fe5bb118ba0c43e0112d5a771a3695965a2ef58 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp4_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp64_tp2_pp1_acc2_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp64_tp2_pp1_acc2_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..82df24d4f6bd77ef178c8972869f52426db6e444 --- /dev/null +++ b/scripts/run_1.14G_dp64_tp2_pp1_acc2_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp64_tp2_pp1_acc2_mbs4_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp64_tp2_pp1_acc2_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1610f3dbaf4689981f65edd6f02ace0b583973a3 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc16_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp16_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp16_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f59373699c5c2c1b8e05fdf5f977b84768bca8e8 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp16_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a898f423b6129eff66911b55cbc029aed23817d4 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0d5c465fb199117068b779e41ee3dfd9dc5257f6 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp2_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d62de8dfdca5f70895d5bee58c54ae3b1fbd5924 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp2_pp1_acc8_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..52fa22366262dd76e2b0f5985c5aca55258e2dff --- /dev/null +++ b/scripts/run_1.34G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb37e94148ea3ed5f0234e8ca8967beb371307b9 --- /dev/null +++ b/scripts/run_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp1_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..db2bc446387cb9003a1d73aa4c9976f3b759ce35 --- /dev/null +++ b/scripts/run_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp8_pp16_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7384b03dcf6ea3491d5a8e18cbfb7e4cd5a4deb5 --- /dev/null +++ b/scripts/run_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp8_pp16_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp256_pp1_acc2_mbs128_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp256_pp1_acc2_mbs128_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b4d64da0f4fc11774a6bd7e1496f60893b384614 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp256_pp1_acc2_mbs128_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc2_mbs128_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc2_mbs128_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp256_pp1_acc64_mbs1_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp256_pp1_acc64_mbs1_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0e91bde4687f7a88201fbca05e02d3bca17313d6 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp256_pp1_acc64_mbs1_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc64_mbs1_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc64_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp32_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp32_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b25e8db5a1a50fe9c04d0745b970d6baa11a713e --- /dev/null +++ b/scripts/run_1.34G_dp2_tp32_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp32_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp32_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1763cadb3b4a3cbca47b7fb19696afc6ee01fcb9 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp32_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8aca31a41acc0f65caf7c72035688d2f02843a62 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp4_pp1_acc8_mbs8_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb5f703c468b4192c76fd26df1586a8ca7c87daa --- /dev/null +++ b/scripts/run_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp32_tp8_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp32_tp8_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e84b247a14c2d590e257002704e9545192131897 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp8_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp8_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp8_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp1_pp2_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp1_pp2_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..11c59d39780f56e64c41380d245d1b5057eb0e89 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp1_pp2_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp1_pp2_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp1_pp2_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..20c8878d23680053e6f5e40a07378d182f6007d9 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp64_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp64_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cc6c7710ff60c7a17d678f3a2fee5212970f72e9 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp64_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp64_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp64_pp1_acc16_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c61f33836ab62c24a0687d4e066f90d9cc5cf619 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp8_pp1_acc8_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp64_pp1_acc128_mbs2_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp8_tp64_pp1_acc128_mbs2_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3807153f47f61902f377e03285f10d5273c3ef1 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp64_pp1_acc128_mbs2_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp64_pp1_acc128_mbs2_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp64_pp1_acc128_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp64_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp64_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..755a29902168553b16e55850baee5d5a6ca1302f --- /dev/null +++ b/scripts/run_1.34G_dp8_tp64_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp64_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp64_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.56G_dp1_tp8_pp1_acc1_mbs8_seq2048_zero0_l28_h3072_heads24.sh b/scripts/run_3.56G_dp1_tp8_pp1_acc1_mbs8_seq2048_zero0_l28_h3072_heads24.sh new file mode 100644 index 0000000000000000000000000000000000000000..340d1db92fa3a0d513d75743a86fc3b7f2c6f47c --- /dev/null +++ b/scripts/run_3.56G_dp1_tp8_pp1_acc1_mbs8_seq2048_zero0_l28_h3072_heads24.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.56G_dp1_tp8_pp1_acc1_mbs8_seq2048_zero0_l28_h3072_heads24 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_3.56G_dp1_tp8_pp1_acc1_mbs8_seq2048_zero0_l28_h3072_heads24.yaml diff --git a/scripts/run_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache.sh b/scripts/run_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache.sh new file mode 100644 index 0000000000000000000000000000000000000000..81767d63099e968811ff5ca35dc9e3db3f8b8386 --- /dev/null +++ b/scripts/run_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp16_pp1_acc1_mbs30_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml +fi diff --git a/scripts/run_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0c87f1eaa4748cdde7d66db2840dada629d1c623 --- /dev/null +++ b/scripts/run_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp32_tp2_pp4_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..094b1059cb6591cfc3abb683d406a8413d8595cd --- /dev/null +++ b/scripts/run_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..dc569542932c38215df6f3163f120b2f704a493c --- /dev/null +++ b/scripts/run_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp8_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ae2e672c7ac0bc4c437a31cb82c5c527f69f336 --- /dev/null +++ b/scripts/run_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp1_pp1_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d92864447c27dc5598e52a61dd741705ebdc2ad --- /dev/null +++ b/scripts/run_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp8_tp1_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp8_tp1_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..26ba84c3bdf18be4209c37860fe570a7cb4e557c --- /dev/null +++ b/scripts/run_3.57G_dp8_tp1_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.57G_dp8_tp1_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp1_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cad092645c2c868f07b67da5134dfe7f442acc74 --- /dev/null +++ b/scripts/run_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp2_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..68d389cb8e0a632833e1af0a8ad458a039d197ca --- /dev/null +++ b/scripts/run_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp2_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.sh b/scripts/run_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.sh new file mode 100644 index 0000000000000000000000000000000000000000..e2c31e78d32adc5e9f4ff293b5055d21ce311814 --- /dev/null +++ b/scripts/run_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=4 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.8G_dp1_tp1_pp4_acc3_mbs4_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml +fi diff --git a/scripts/run_469G_dp2_tp2_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp2_tp2_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9397c4a429d2412aa3500472c9aef4c2348e1d38 --- /dev/null +++ b/scripts/run_469G_dp2_tp2_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_469G_dp2_tp2_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp2_tp2_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..74bfa7eb6e7b37f243b00312dca69d1971a12d3f --- /dev/null +++ b/scripts/run_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..efb83bd5bfe9467ae543bb9fd24a1fdca7627165 --- /dev/null +++ b/scripts/run_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp8_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..12cfed82c5cfde84a5b1a08205ee7cfbc8870392 --- /dev/null +++ b/scripts/run_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp1_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c9abc4fcf2d332a8952e6e15dacd4638259e9e7d --- /dev/null +++ b/scripts/run_469G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_469G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..972a76518e663352a894ed78815f5fedd584eda1 --- /dev/null +++ b/scripts/run_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b0f968e3a83968de0a3bcd65eeda75e90aa799f --- /dev/null +++ b/scripts/run_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_5.5G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh b/scripts/run_5.5G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh new file mode 100644 index 0000000000000000000000000000000000000000..be4cbebd48806b1d4762eea5ef7c2f1b55e89dfa --- /dev/null +++ b/scripts/run_5.5G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_5.5G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_5.5G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml diff --git a/scripts/run_5.5G_dp4_tp64_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh b/scripts/run_5.5G_dp4_tp64_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh new file mode 100644 index 0000000000000000000000000000000000000000..b3c5af275a436e30e2e5e2d9408ecd7758010571 --- /dev/null +++ b/scripts/run_5.5G_dp4_tp64_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_5.5G_dp4_tp64_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_5.5G_dp4_tp64_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml diff --git a/scripts/run_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8.sh b/scripts/run_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8.sh new file mode 100644 index 0000000000000000000000000000000000000000..44472bae5a25ed4e382dd45e52a1391bbf9d2a9f --- /dev/null +++ b/scripts/run_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8 # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_78.7G_dp4_tp4_pp8_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab49k_gqa8.yaml +fi diff --git a/scripts/run_8.86G_dp1_tp4_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp1_tp4_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..371ce53f3780b9f1e31420389103c6f31d973ab5 --- /dev/null +++ b/scripts/run_8.86G_dp1_tp4_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_8.86G_dp1_tp4_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp1_tp4_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..89acf1f98c45ccf44852d86654b7d8d8483d9e01 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp16_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a7d45204a93f178d5bdd3bbaa190d71da8052d51 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..55e910b3b0774a2587f9974dd5a9f7db21620000 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=00:10:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..27d136f7625c036a0b42aaa90ab209fd2a7971b0 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp1_acc2_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b3d2409c87a09f0557f84801779e763f173ec929 --- /dev/null +++ b/scripts/run_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp8_pp1_acc1_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4716329217fe8a64ccd20f59009397b13b501472 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp16_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a272fa11ad1db36da97a6c1e4a643832045b6796 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0ef760c2c67e4acb623f4c288964ade439ed7065 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9640fcc3ac5945b21ca800dcf34d5075c6520152 --- /dev/null +++ b/scripts/run_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp2_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..40c0f0071ddbcfe17a12380e12fcfa045d4e6d70 --- /dev/null +++ b/scripts/run_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp4_tp2_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp8_tp1_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp8_tp1_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f54de502a8f1b6d640df1c071d2a86979c81378d --- /dev/null +++ b/scripts/run_80G_dp8_tp1_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp8_tp1_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp1_pp2_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8d7576ef328a7c81012ced806f4edb1a3bbc6c8c --- /dev/null +++ b/scripts/run_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp8_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l24_h2048_heads16.sh b/scripts/run_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l24_h2048_heads16.sh new file mode 100644 index 0000000000000000000000000000000000000000..b53b4375d862402694445ab66601098d0f586f90 --- /dev/null +++ b/scripts/run_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l24_h2048_heads16.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l24_h2048_heads16 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%x-%j.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=50 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l24_h2048_heads16.yaml