+ source /etc/profile.d/modules.sh ++ . /usr/share/modules/init/bash +++ unset _mlshdbg +++ '[' 0 = 1 ']' +++ unset _mlre _mlIFS +++ '[' -n x ']' +++ _mlIFS=' ' +++ IFS=' ' +++ '[' -n '' ']' ++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit +++ _mlcode='module() { unset _mlshdbg; if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then case "$-" in *v*x*) set +vx; _mlshdbg='\''vx'\'' ;; *v*) set +v; _mlshdbg='\''v'\'' ;; *x*) set +x; _mlshdbg='\''x'\'' ;; *) _mlshdbg='\'''\'' ;; esac; fi; unset _mlre _mlIFS; if [ -n "${IFS+x}" ]; then _mlIFS=$IFS; fi; IFS='\'' '\''; for _mlv in ${MODULES_RUN_QUARANTINE:-}; do if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then _mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' "; fi; _mlrv="MODULES_RUNENV_${_mlv}"; _mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' "; fi; done; if [ -n "${_mlre:-}" ]; then eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`; else eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`; fi; _mlstatus=$?; if [ -n "${_mlIFS+x}" ]; then IFS=$_mlIFS; else unset IFS; fi; unset _mlre _mlv _mlrv _mlIFS; if [ -n "${_mlshdbg:-}" ]; then set -$_mlshdbg; fi; unset _mlshdbg; return $_mlstatus; }; MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD; ENV=/usr/share/modules/init/profile.sh; export ENV; MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare; BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV; MODULESHOME=/usr/share/modules; export MODULESHOME; LOADEDMODULES=; export LOADEDMODULES; MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH; test 0;' +++ _mlret=0 +++ '[' -n x ']' +++ IFS=' ' +++ unset _mlIFS +++ unset _mlre _mlv _mlrv +++ '[' 0 -eq 0 ']' +++ eval 'module() { unset _mlshdbg; if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then case "$-" in *v*x*) set +vx; _mlshdbg='\''vx'\'' ;; *v*) set +v; _mlshdbg='\''v'\'' ;; *x*) set +x; _mlshdbg='\''x'\'' ;; *) _mlshdbg='\'''\'' ;; esac; fi; unset _mlre _mlIFS; if [ -n "${IFS+x}" ]; then _mlIFS=$IFS; fi; IFS='\'' '\''; for _mlv in ${MODULES_RUN_QUARANTINE:-}; do if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then _mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' "; fi; _mlrv="MODULES_RUNENV_${_mlv}"; _mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' "; fi; done; if [ -n "${_mlre:-}" ]; then eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`; else eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`; fi; _mlstatus=$?; if [ -n "${_mlIFS+x}" ]; then IFS=$_mlIFS; else unset IFS; fi; unset _mlre _mlv _mlrv _mlIFS; if [ -n "${_mlshdbg:-}" ]; then set -$_mlshdbg; fi; unset _mlshdbg; return $_mlstatus; }; MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD; ENV=/usr/share/modules/init/profile.sh; export ENV; MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare; BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV; MODULESHOME=/usr/share/modules; export MODULESHOME; LOADEDMODULES=; export LOADEDMODULES; MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH; test 0;' ++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl ++++ export MODULES_CMD ++++ ENV=/usr/share/modules/init/profile.sh ++++ export ENV ++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1' ++++ export MODULEPATH_modshare ++++ BASH_ENV=/usr/share/modules/init/bash ++++ export BASH_ENV ++++ MODULESHOME=/usr/share/modules ++++ export MODULESHOME ++++ LOADEDMODULES= ++++ export LOADEDMODULES ++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles' ++++ export MODULEPATH ++++ test 0 +++ '[' 0 = 1 ']' +++ '[' -t 2 ']' +++ export -f module +++ export -f switchml +++ '[' 5 -ge 3 ']' +++ [[ ehxB =~ i ]] +++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]] +++ '[' '!' -n '' ']' +++ MANPATH=: +++ export MANPATH ++++ manpath +++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]] +++ unset _mlcode _mlret +++ '[' -n '' ']' + module load cuda/12.1 + unset _mlshdbg + '[' 0 = 1 ']' + unset _mlre _mlIFS + '[' -n x ']' + _mlIFS=' ' + IFS=' ' + '[' -n '' ']' ++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1 + eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH; LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH; FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE; MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH; LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH; _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_; LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES; MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH; NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare; NCCL_PROTO=simple;' export 'NCCL_PROTO; MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare; LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare; NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME; AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME; NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME; FI_PROVIDER=efa;' export 'FI_PROVIDER; AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare; CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare; LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare; FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER; _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare; LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare; MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare; PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH; CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME; PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare; test' '0;' ++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include ++ export CPATH ++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib: ++ export LD_LIBRARY_PATH ++ FI_EFA_FORK_SAFE=1 ++ export FI_EFA_FORK_SAFE ++ MANPATH=/usr/local/cuda-12.1/share/man:: ++ export MANPATH ++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64 ++ export LIBRARY_PATH ++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1 ++ export _LMFILES_ ++ LOADEDMODULES=cuda/12.1 ++ export LOADEDMODULES ++ MPI_PATH=/opt/amazon/openmpi ++ export MPI_PATH ++ NCCL_HOME_modshare=/opt/nccl/build:1 ++ export NCCL_HOME_modshare ++ NCCL_PROTO=simple ++ export NCCL_PROTO ++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1 ++ export MANPATH_modshare ++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1 ++ export LIBRARY_PATH_modshare ++ NCCL_SOCKET_IFNAME=enp ++ export NCCL_SOCKET_IFNAME ++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl ++ export AWS_OFI_NCCL_HOME ++ NCCL_HOME=/opt/nccl/build ++ export NCCL_HOME ++ FI_PROVIDER=efa ++ export FI_PROVIDER ++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1 ++ export AWS_OFI_NCCL_HOME_modshare ++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1 ++ export CPATH_modshare ++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1 ++ export LD_LIBRARY_PATH_modshare ++ FI_EFA_ENABLE_SHM_TRANSFER=1 ++ export FI_EFA_ENABLE_SHM_TRANSFER ++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1 ++ export _LMFILES__modshare ++ LOADEDMODULES_modshare=cuda/12.1:1 ++ export LOADEDMODULES_modshare ++ MPI_PATH_modshare=/opt/amazon/openmpi:1 ++ export MPI_PATH_modshare ++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin ++ export PATH ++ CUDA_HOME=/usr/local/cuda-12.1 ++ export CUDA_HOME ++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1 ++ export PATH_modshare ++ test 0 + _mlstatus=0 + '[' -n x ']' + IFS=' ' + unset _mlre _mlv _mlrv _mlIFS + '[' -n '' ']' + unset _mlshdbg + return 0 + source /fsx/nouamane/miniconda/bin/activate ++ _CONDA_ROOT=/fsx/nouamane/miniconda ++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh +++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda +++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda +++ export _CE_M= +++ _CE_M= +++ export _CE_CONDA= +++ _CE_CONDA= +++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python +++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python +++ '[' -z x ']' ++ conda activate ++ local cmd=activate ++ case "$cmd" in ++ __conda_activate activate ++ '[' -n '' ']' ++ local ask_conda +++ PS1= +++ __conda_exe shell.posix activate +++ /fsx/nouamane/miniconda/bin/conda shell.posix activate ++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh" PS1='\''(base) '\'' export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\'' export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\'' export CONDA_SHLVL='\''3'\'' export CONDA_DEFAULT_ENV='\''base'\'' export CONDA_PROMPT_MODIFIER='\''(base) '\'' export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\'' export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\'' export _CE_M='\'''\'' export _CE_CONDA='\'''\'' export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''' ++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh" PS1='\''(base) '\'' export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\'' export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\'' export CONDA_SHLVL='\''3'\'' export CONDA_DEFAULT_ENV='\''base'\'' export CONDA_PROMPT_MODIFIER='\''(base) '\'' export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\'' export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\'' export _CE_M='\'''\'' export _CE_CONDA='\'''\'' export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''' +++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh ++++ test -n '' ++++ unset XML_CATALOG_FILES ++++ unset xml_catalog_files_libxml2 +++ PS1='(base) ' +++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin +++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin +++ export CONDA_PREFIX=/fsx/nouamane/miniconda +++ CONDA_PREFIX=/fsx/nouamane/miniconda +++ export CONDA_SHLVL=3 +++ CONDA_SHLVL=3 +++ export CONDA_DEFAULT_ENV=base +++ CONDA_DEFAULT_ENV=base +++ export 'CONDA_PROMPT_MODIFIER=(base) ' +++ CONDA_PROMPT_MODIFIER='(base) ' +++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121 +++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121 +++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda +++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda +++ export _CE_M= +++ _CE_M= +++ export _CE_CONDA= +++ _CE_CONDA= +++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python +++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python ++ __conda_hashr ++ '[' -n '' ']' ++ '[' -n '' ']' ++ hash -r + conda activate 2-1-cu121 + local cmd=activate + case "$cmd" in + __conda_activate activate 2-1-cu121 + '[' -n '' ']' + local ask_conda ++ PS1='(base) ' ++ __conda_exe shell.posix activate 2-1-cu121 ++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121 + ask_conda='PS1='\''(2-1-cu121) '\'' export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\'' export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\'' export CONDA_SHLVL='\''4'\'' export CONDA_DEFAULT_ENV='\''2-1-cu121'\'' export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\'' export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\'' export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\'' export _CE_M='\'''\'' export _CE_CONDA='\'''\'' export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'' . "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"' + eval 'PS1='\''(2-1-cu121) '\'' export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\'' export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\'' export CONDA_SHLVL='\''4'\'' export CONDA_DEFAULT_ENV='\''2-1-cu121'\'' export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\'' export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\'' export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\'' export _CE_M='\'''\'' export _CE_CONDA='\'''\'' export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'' . "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"' ++ PS1='(2-1-cu121) ' ++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin ++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin ++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121 ++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121 ++ export CONDA_SHLVL=4 ++ CONDA_SHLVL=4 ++ export CONDA_DEFAULT_ENV=2-1-cu121 ++ CONDA_DEFAULT_ENV=2-1-cu121 ++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) ' ++ CONDA_PROMPT_MODIFIER='(2-1-cu121) ' ++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda ++ CONDA_PREFIX_3=/fsx/nouamane/miniconda ++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda ++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda ++ export _CE_M= ++ _CE_M= ++ export _CE_CONDA= ++ _CE_CONDA= ++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python ++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python ++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh +++ test -n '' +++ xml_catalog_files_libxml2= +++ XML_CATALOG_FILES= +++ conda_catalog_files= +++ ifs_libxml2=' ' +++ IFS=' ' +++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121 +++ for pre in ${rem} +++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121 +++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121 +++ rem= +++ IFS=' ' +++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog' +++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog' +++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog' +++ unset conda_catalog_files ifs_libxml2 rem + __conda_hashr + '[' -n '' ']' + '[' -n '' ']' + hash -r + export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin + PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin ++ scontrol show hostnames 'ip-26-0-166-[15,36]' + export 'NODELIST=ip-26-0-166-15 ip-26-0-166-36' + NODELIST='ip-26-0-166-15 ip-26-0-166-36' ++ head -n1 ++ scontrol show hostnames 'ip-26-0-166-[15,36]' + export MASTER_NODE=ip-26-0-166-15 + MASTER_NODE=ip-26-0-166-15 + export MASTER_PORT=12356 + MASTER_PORT=12356 + export NNODES=2 + NNODES=2 + export GPUS_PER_NODE=8 + GPUS_PER_NODE=8 + export WORLD_SIZE=16 + WORLD_SIZE=16 + export CUDA_DEVICE_MAX_CONNECTIONS=1 + CUDA_DEVICE_MAX_CONNECTIONS=1 + export NANOTRON_BENCHMARK=1 + NANOTRON_BENCHMARK=1 + echo 'Master node: ip-26-0-166-15' Master node: ip-26-0-166-15 + echo 'All nodes: ip-26-0-166-15 ip-26-0-166-36' All nodes: ip-26-0-166-15 ip-26-0-166-36 + echo 'World size: 16' World size: 16 + srun torchrun --nnodes=2 --nproc_per_node=8 --rdzv_id=13131580 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-166-15:12356 run_train.py --config-file examples/config_tiny_llama_profile.yaml [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified. [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified. [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] ***************************************** [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] ***************************************** [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] ***************************************** [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. [2024-12-07 22:04:13,590] torch.distributed.run: [WARNING] ***************************************** 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Config: 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Config(general=GeneralArgs(project='debug', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: run='dp1_tp8_seq64k', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: seed=42, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: step=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: consumed_train_samples=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: benchmark_csv_path=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: ignore_sanity_checks=True), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: parallelism=ParallelismArgs(dp=16, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pp=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tp=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pp_engine=, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tp_mode=, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tp_linear_async_communication=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: recompute_layer=False, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tp_recompute_allgather=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: expert_parallel_size=1), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: eos_token_id=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: hidden_act='silu', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: hidden_size=2048, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: initializer_range=0.02, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: intermediate_size=8192, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: is_llama_config=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: max_position_embeddings=65536, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_attention_heads=32, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_hidden_layers=24, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_key_value_heads=32, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pad_token_id=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pretraining_tp=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rms_norm_eps=1e-05, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_scaling=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_theta=10000.0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_interleaved=False, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tie_word_embeddings=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: use_cache=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: vocab_size=49152), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: init_method=RandomInit(std=0.02), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: dtype=torch.bfloat16, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: make_vocab_size_divisible_by=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: ddp_bucket_cap_mb=25), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tokenizer_revision=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tokenizer_max_length=None), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: checkpoint_interval=10000, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: save_initial_state=False, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: save_final_state=False, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: resume_checkpoint_path=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: checkpoints_path_is_shared_file_system=False), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: logging=LoggingArgs(log_level='info', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: log_level_replica='info', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: iteration_step_info_interval=1), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tokens=TokensArgs(sequence_length=65536, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: train_steps=10, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: micro_batch_size=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: batch_accumulation_per_replica=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: val_check_interval=100, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: limit_val_batches=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: limit_test_batches=0), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: adam_beta1=0.9, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: adam_beta2=0.95, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: torch_adam_is_fused=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: name='adamW'), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: zero_stage=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: weight_decay=0.01, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: clip_grad=1.0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: accumulate_grad_in_fp32=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lr_warmup_steps=2, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lr_warmup_style='linear', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lr_decay_style='cosine', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lr_decay_steps=13, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lr_decay_starting_step=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: min_decay_lr=1e-05)), 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: data_stages=[DatasetStageArgs(name='Stable Training Stage', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: start_training_step=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: data=DataArgs(dataset=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: seed=42, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_loading_workers=1))], 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: profiler=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: lighteval=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: s3_upload=None) 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Model Config: 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: LlamaConfig(bos_token_id=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: eos_token_id=0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: hidden_act='silu', 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: hidden_size=2048, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: initializer_range=0.02, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: intermediate_size=8192, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: is_llama_config=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: max_position_embeddings=65536, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_attention_heads=32, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_hidden_layers=24, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: num_key_value_heads=32, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pad_token_id=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: pretraining_tp=1, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rms_norm_eps=1e-05, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_scaling=None, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_theta=10000.0, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: rope_interleaved=False, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: tie_word_embeddings=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: use_cache=True, 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: vocab_size=49152) 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Building model.. 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Initialize RoPE Theta = 10000.0 12/07/2024 22:04:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Setting PP block ranks... 12/07/2024 22:04:57 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Total number of parameters: 1.71G (3264.19MiB) 12/07/2024 22:04:57 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Local number of parameters: 1.71G (3264.19MiB) 12/07/2024 22:04:57 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [After model building] Memory usage: 3264.22MiB. Peak allocated: 5504.00MiB Peak reserved: 22914.00MiB 12/07/2024 22:05:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: No checkpoint path provided. 12/07/2024 22:05:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Parametrizing model parameters using StandardParametrizator 12/07/2024 22:05:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [Optimizer Building] Using LearningRateForSP as learning rate 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [Training Plan] Stage Stable Training Stage has 9 remaining training steps and has consumed 0 samples 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: Using dummy data generator 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [Training Plan] There are 1 training stages 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [Stage Stable Training Stage] start from step 1 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: 12/07/2024 22:05:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-166-15]: [Start training] datetime: 2024-12-07 22:05:06.503598 | mbs: 1 | grad_accum: 1 | global_batch_size: 16 | sequence_length: 65536 | train_steps: 10 | start_iteration_step: 0 | consumed_train_samples: 0 Traceback (most recent call last): Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in Traceback (most recent call last): File "/fsx/nouamane/projects/nanotron/run_train.py", line 237, in trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader)trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train trainer.train(dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( outputs = self.pipeline_engine.train_batch_iter( outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter(outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter outputs = self.pipeline_engine.train_batch_iter( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model) output = model(**micro_batch) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)output = model(**micro_batch) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = model(**micro_batch) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward else self._run_ddp_forward(*inputs, **kwargs) else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] return self.module(*inputs, **kwargs) # type: ignore[index]return self.module(*inputs, **kwargs) # type: ignore[index] return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs)return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model(sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs)else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward sharded_logits = self.model( else self._run_ddp_forward(*inputs, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index]return self.module(*inputs, **kwargs) # type: ignore[index]return self.module(*inputs, **kwargs) # type: ignore[index] return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.module(*inputs, **kwargs) # type: ignore[index] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return self._call_impl(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) hidden_encoder_states = encoder_block(**hidden_encoder_states) hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl sharded_logits = self.model( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) return forward_call(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states hidden_encoder_states = encoder_block(**hidden_encoder_states) return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states)hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_encoder_states = encoder_block(**hidden_encoder_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl merged_states = self.gate_up_proj(hidden_states) return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return forward_call(*args, **kwargs) merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl merged_states = self.gate_up_proj(hidden_states)return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl merged_states = self.gate_up_proj(hidden_states) merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return column_linear( merged_states = self.gate_up_proj(hidden_states) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs)return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return forward_call(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return column_linear( return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return column_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc]return super().apply(*args, **kwargs) # type: ignore[misc]return super().apply(*args, **kwargs) # type: ignore[misc]return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc] File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc] File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper return super().apply(*args, **kwargs) # type: ignore[misc]return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc] return super().apply(*args, **kwargs) # type: ignore[misc] File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper return super().apply(*args, **kwargs) # type: ignore[misc] File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper return super().apply(*args, **kwargs) # type: ignore[misc] File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper return func(*args, **kwargs) return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return func(*args, **kwargs) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward return F.linear(gathered_tensor, weight, bias) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 3 has a total capacty of 79.33 GiB of which 265.94 MiB is free. Including non-PyTorch memory, this process has 79.06 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF return F.linear(gathered_tensor, weight, bias) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 4 has a total capacty of 79.33 GiB of which 421.94 MiB is free. Including non-PyTorch memory, this process has 78.91 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF return F.linear(gathered_tensor, weight, bias) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 5 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF return F.linear(gathered_tensor, weight, bias) return F.linear(gathered_tensor, weight, bias) torch.cuda. torch.cudaOutOfMemoryErrorreturn F.linear(gathered_tensor, weight, bias): . OutOfMemoryErrorCUDA out of memory. Tried to allocate 2.00 GiB. GPU 3 has a total capacty of 79.33 GiB of which 265.94 MiB is free. Including non-PyTorch memory, this process has 79.06 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF : return F.linear(gathered_tensor, weight, bias)CUDA out of memory. Tried to allocate 2.00 GiB. GPU 4 has a total capacty of 79.33 GiB of which 421.94 MiB is free. Including non-PyTorch memory, this process has 78.91 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF torch.cudareturn F.linear(gathered_tensor, weight, bias). OutOfMemoryErrortorch.cuda.: OutOfMemoryErrorCUDA out of memory. Tried to allocate 2.00 GiB. GPU 6 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF: torch.cuda CUDA out of memory. Tried to allocate 2.00 GiB. GPU 7 has a total capacty of 79.33 GiB of which 625.94 MiB is free. Including non-PyTorch memory, this process has 78.71 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF. OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 1 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF return F.linear(gathered_tensor, weight, bias) return F.linear(gathered_tensor, weight, bias) return F.linear(gathered_tensor, weight, bias) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 2 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF torch.cuda. OutOfMemoryErrorreturn F.linear(gathered_tensor, weight, bias): CUDA out of memory. Tried to allocate 2.00 GiB. GPU 7 has a total capacty of 79.33 GiB of which 625.94 MiB is free. Including non-PyTorch memory, this process has 78.71 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONFtorch.cuda .return F.linear(gathered_tensor, weight, bias)OutOfMemoryError torch.cuda : .return F.linear(gathered_tensor, weight, bias)CUDA out of memory. Tried to allocate 2.00 GiB. GPU 5 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONFOutOfMemoryError torch.cuda : .CUDA out of memory. Tried to allocate 2.00 GiB. GPU 0 has a total capacty of 79.33 GiB of which 469.94 MiB is free. Including non-PyTorch memory, this process has 78.86 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONFtorch.cuda OutOfMemoryError.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 1 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 2 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF return F.linear(gathered_tensor, weight, bias) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 6 has a total capacty of 79.33 GiB of which 445.94 MiB is free. Including non-PyTorch memory, this process has 78.88 GiB memory in use. Of the allocated memory 67.54 GiB is allocated by PyTorch, and 549.68 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF [2024-12-07 22:05:14,882] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1197853 closing signal SIGTERM [2024-12-07 22:05:14,882] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1197854 closing signal SIGTERM [2024-12-07 22:05:14,882] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1197856 closing signal SIGTERM [2024-12-07 22:05:14,882] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1197857 closing signal SIGTERM [2024-12-07 22:05:14,884] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1434351 closing signal SIGTERM [2024-12-07 22:05:15,710] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 2 (pid: 1197855) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python Traceback (most recent call last): File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')()) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper return f(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main run(args) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run elastic_launch( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ run_train.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2024-12-07_22:05:14 host : ip-26-0-166-36.ec2.internal rank : 13 (local_rank: 5) exitcode : 1 (pid: 1197858) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [2]: time : 2024-12-07_22:05:14 host : ip-26-0-166-36.ec2.internal rank : 14 (local_rank: 6) exitcode : 1 (pid: 1197859) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [3]: time : 2024-12-07_22:05:14 host : ip-26-0-166-36.ec2.internal rank : 15 (local_rank: 7) exitcode : 1 (pid: 1197860) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2024-12-07_22:05:14 host : ip-26-0-166-36.ec2.internal rank : 10 (local_rank: 2) exitcode : 1 (pid: 1197855) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ srun: error: ip-26-0-166-36: task 1: Exited with exit code 1 [2024-12-07 22:05:17,803] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 1 (pid: 1434352) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python Traceback (most recent call last): File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')()) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper return f(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main run(args) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run elastic_launch( File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ run_train.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 2 (local_rank: 2) exitcode : 1 (pid: 1434353) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [2]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 3 (local_rank: 3) exitcode : 1 (pid: 1434354) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [3]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 4 (local_rank: 4) exitcode : 1 (pid: 1434355) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [4]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 5 (local_rank: 5) exitcode : 1 (pid: 1434356) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [5]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 6 (local_rank: 6) exitcode : 1 (pid: 1434357) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [6]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 7 (local_rank: 7) exitcode : 1 (pid: 1434358) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2024-12-07_22:05:14 host : ip-26-0-166-15.ec2.internal rank : 1 (local_rank: 1) exitcode : 1 (pid: 1434352) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ srun: error: ip-26-0-166-15: task 0: Exited with exit code 1