Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- logs/12179039-bench_138M_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h768_heads12.out +602 -0
- logs/12647824-bench_3.27G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_tpmodeALL_l28_h3072_heads24.out +0 -0
- logs/13212437-bench_1.14G_dp8_tp1_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab32k.out +1053 -0
- logs/13212476-bench_1.34G_dp8_tp1_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.out +1065 -0
- logs/13264457-bench_1.34G_dp16_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13264673-bench_1.14G_dp4_tp2_pp1_acc128_mbs1_seq8192_zero1_tpmodeALL_vocab32k.out +611 -0
- logs/13264905-bench_1.14G_dp8_tp2_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k.out +0 -0
- logs/13265355-bench_1.34G_dp2_tp4_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.out +835 -0
- logs/13265627-bench_1.34G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.out +0 -0
- logs/13265710-bench_1.34G_dp8_tp4_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab131k.out +740 -0
- logs/13265791-bench_1.34G_dp16_tp4_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.out +917 -0
- logs/13265922-bench_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13266540-bench_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k.out +0 -0
- logs/13385099-bench_1.14G_dp2_tp8_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab32k.out +524 -0
- logs/13401577-bench_stress_test.out +518 -0
- logs/13401875-bench_stress_test.out +518 -0
- logs/13401984-bench_stress_test.out +518 -0
- logs/13417428-bench_stress_test.out +0 -0
- logs/13417834-bench_stress_test.out +0 -0
- logs/13417884-bench_stress_test.out +0 -0
- logs/13418012-bench_stress_test.out +0 -0
- logs/13438545-bench_3.57G_dp64_tp1_pp1_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13438869-bench_3.57G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.out +0 -0
- logs/13442395-bench_3.57G_dp1_tp4_pp16_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.out +0 -0
- logs/13443093-bench_80G_dp1_tp4_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.out +805 -0
- logs/13443135-bench_469G_dp1_tp8_pp4_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.out +0 -0
- logs/13443142-bench_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out +1042 -0
- logs/13443224-bench_469G_dp1_tp4_pp4_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out +1008 -0
- logs/13458865-bench_1.34G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.out +0 -0
- logs/13458918-bench_3.57G_dp32_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13460158-bench_3.57G_dp4_tp4_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +923 -0
- logs/13490790-2024-12-18-10-40-58-bench_8.86G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.out +22 -0
- logs/13490809-2024-12-18-11-24-42-bench_8.86G_dp2_tp8_pp1_acc4_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out +4 -0
- logs/13490809-2024-12-18-11-33-26-bench_8.86G_dp2_tp8_pp1_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out +849 -0
- logs/13490826-2024-12-18-12-27-58-bench_1.34G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13490827-2024-12-18-12-26-05-bench_1.34G_dp2_tp8_pp1_acc8_mbs16_seq4096_zero0_tpmodeRED_vocab131k.out +309 -0
- logs/13505397-bench_1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13512539-bench_1.34G_dp32_tp16_pp1_acc4_mbs2_seq4096_zero0_tpmodeRED_vocab131k.out +0 -0
- logs/13522775-bench_3.57G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13768581-bench_1.34G_dp2_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803107-bench_3.57G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803282-bench_8.86G_dp2_tp4_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803376-bench_8.86G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803468-bench_1.34G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803707-bench_3.57G_dp2_tp4_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803730-bench_3.57G_dp2_tp8_pp4_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13803789-bench_469G_dp2_tp16_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13848051-bench_1.34G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
- logs/13848114-bench_80G_dp2_tp2_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.out +0 -0
.gitattributes
CHANGED
@@ -89,3 +89,6 @@ logs/14099274-bench_3.57G_dp2_tp16_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab
|
|
89 |
logs/13849407-bench_1.34G_dp2_tp16_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
90 |
logs/13849451-bench_80G_dp2_tp32_pp4_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
91 |
logs/14099247-bench_3.57G_dp16_tp4_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
89 |
logs/13849407-bench_1.34G_dp2_tp16_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
90 |
logs/13849451-bench_80G_dp2_tp32_pp4_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
91 |
logs/14099247-bench_3.57G_dp16_tp4_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
92 |
+
logs/13849412-bench_3.57G_dp2_tp16_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
93 |
+
logs/14099199-bench_1.34G_dp8_tp16_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
94 |
+
logs/13849552-bench_8.86G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.out filter=lfs diff=lfs merge=lfs -text
|
logs/12179039-bench_138M_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h768_heads12.out
ADDED
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-160-225
|
400 |
+
+ export NODELIST=ip-26-0-160-225
|
401 |
+
+ NODELIST=ip-26-0-160-225
|
402 |
+
++ scontrol show hostnames ip-26-0-160-225
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-160-225
|
405 |
+
+ MASTER_NODE=ip-26-0-160-225
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-160-225'
|
419 |
+
Master node: ip-26-0-160-225
|
420 |
+
+ echo 'All nodes: ip-26-0-160-225'
|
421 |
+
All nodes: ip-26-0-160-225
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=12179039 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-160-225:12356 run_train.py --config-file benchmark/configs/config_138M_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h768_heads12.yaml
|
425 |
+
[2024-12-02 11:35:42,124] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-02 11:35:42,124] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-02 11:35:42,124] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-02 11:35:42,124] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-02 11:35:42,124] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config:
|
431 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config(general=GeneralArgs(project='debug',
|
432 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: run='138M_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h768_heads12',
|
433 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
434 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: step=None,
|
435 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: consumed_train_samples=None,
|
436 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: benchmark_csv_path=PosixPath('bench.csv'),
|
437 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ignore_sanity_checks=True),
|
438 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: parallelism=ParallelismArgs(dp=8,
|
439 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp=1,
|
440 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp=1,
|
441 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f9321e58a30>,
|
442 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
443 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_linear_async_communication=True,
|
444 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: recompute_layer=False,
|
445 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_recompute_allgather=True,
|
446 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: expert_parallel_size=1),
|
447 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
448 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=0,
|
449 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
450 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=768,
|
451 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
452 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=3072,
|
453 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
454 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
455 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=12,
|
456 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=12,
|
457 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=12,
|
458 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
459 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
460 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
461 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
462 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
463 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_interleaved=False,
|
464 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
|
465 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
466 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=32768),
|
467 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: init_method=RandomInit(std=0.02),
|
468 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dtype=torch.bfloat16,
|
469 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: make_vocab_size_divisible_by=1,
|
470 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ddp_bucket_cap_mb=25),
|
471 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
472 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_revision=None,
|
473 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_max_length=None),
|
474 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
475 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoint_interval=10000,
|
476 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_initial_state=False,
|
477 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_final_state=False,
|
478 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: resume_checkpoint_path=None,
|
479 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints_path_is_shared_file_system=False),
|
480 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: logging=LoggingArgs(log_level='info',
|
481 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: log_level_replica='info',
|
482 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration_step_info_interval=1),
|
483 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokens=TokensArgs(sequence_length=4096,
|
484 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: train_steps=100,
|
485 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: micro_batch_size=1,
|
486 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: batch_accumulation_per_replica=1,
|
487 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: val_check_interval=100,
|
488 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_val_batches=0,
|
489 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_test_batches=0),
|
490 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
491 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta1=0.9,
|
492 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta2=0.95,
|
493 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: torch_adam_is_fused=True,
|
494 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: name='adamW'),
|
495 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: zero_stage=0,
|
496 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: weight_decay=0.01,
|
497 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: clip_grad=1.0,
|
498 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: accumulate_grad_in_fp32=True,
|
499 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
500 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_steps=2,
|
501 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_style='linear',
|
502 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_style='cosine',
|
503 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_steps=13,
|
504 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_starting_step=None,
|
505 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: min_decay_lr=1e-05)),
|
506 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
507 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: start_training_step=1,
|
508 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data=DataArgs(dataset=None,
|
509 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
510 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_loading_workers=1))],
|
511 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: profiler=None,
|
512 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lighteval=None,
|
513 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: s3_upload=None)
|
514 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Model Config:
|
515 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: LlamaConfig(bos_token_id=0,
|
516 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=0,
|
517 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
518 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=768,
|
519 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
520 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=3072,
|
521 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
522 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
523 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=12,
|
524 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=12,
|
525 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=12,
|
526 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
527 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
528 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
529 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
530 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
531 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_interleaved=False,
|
532 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
|
533 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
534 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=32768)
|
535 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Building model..
|
536 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Initialize RoPE Theta = 10000.0
|
537 |
+
12/02/2024 11:36:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Setting PP block ranks...
|
538 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Total number of parameters: 138M (264.04MiB)
|
539 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Local number of parameters: 138M (264.04MiB)
|
540 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 265.06MiB. Peak allocated: 5632.00MiB Peak reserved: 17922.00MiB
|
541 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
|
542 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Parametrizing model parameters using StandardParametrizator
|
543 |
+
12/02/2024 11:36:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Optimizer Building] Using LearningRateForSP as learning rate
|
544 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
545 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Using dummy data generator
|
546 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] There are 1 training stages
|
547 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Stage Stable Training Stage] start from step 1
|
548 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]:
|
549 |
+
12/02/2024 11:36:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Start training] datetime: 2024-12-02 11:36:22.186191 | mbs: 1 | grad_accum: 1 | global_batch_size: 8 | sequence_length: 4096 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
550 |
+
wandb: Tracking run with wandb version 0.16.0
|
551 |
+
wandb: W&B syncing is set to `offline` in this directory.
|
552 |
+
wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.
|
553 |
+
12/02/2024 11:36:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
554 |
+
12/02/2024 11:36:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 1585.24MiB. Peak allocated 5632.00MiB. Peak reserved: 17922.00MiB
|
555 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 1655.31MiB. Peak allocated 4534.11MiB. Peak reserved: 4814.00MiB
|
556 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 1 / 100 | consumed_tokens: 32.8K | elapsed_time_per_iteration_ms: 1.99K | tokens_per_sec: 16.4K | tokens_per_sec_per_gpu: 2.05K | global_batch_size: 8 | lm_loss: 10.6 | lr: 0.00015 | model_tflops_per_gpu: 2.64 | hardware_tflops_per_gpu: 2.64 | grad_norm: 2.42 | cuda_memory_allocated: 2.85G | cuda_max_memory_reserved: 5.05G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69.4G | hd_free_memory_tb: 243G
|
557 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2721.24MiB. Peak allocated 2721.25MiB. Peak reserved: 4814.00MiB
|
558 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2721.99MiB. Peak allocated 5632.97MiB. Peak reserved: 6094.00MiB
|
559 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 2 / 100 | consumed_tokens: 65.5K | elapsed_time_per_iteration_ms: 74.3 | tokens_per_sec: 441K | tokens_per_sec_per_gpu: 55.2K | global_batch_size: 8 | lm_loss: 10.6 | lr: 0.0003 | model_tflops_per_gpu: 70.8 | hardware_tflops_per_gpu: 70.8 | grad_norm: 2.41 | cuda_memory_allocated: 2.85G | cuda_max_memory_reserved: 6.39G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69.4G | hd_free_memory_tb: 243G
|
560 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2721.99MiB. Peak allocated 2722.03MiB. Peak reserved: 6094.00MiB
|
561 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2721.99MiB. Peak allocated 5632.97MiB. Peak reserved: 6094.00MiB
|
562 |
+
num_paramsnum_paramsnum_paramsnum_paramsnum_params
|
563 |
+
num_params
|
564 |
+
|
565 |
+
|
566 |
+
|
567 |
+
{'total': 138431232, 'local': 138431232}{'total': 138431232, 'local': 138431232}
|
568 |
+
|
569 |
+
{'total': 138431232, 'local': 138431232}
|
570 |
+
{'total': 138431232, 'local': 138431232}
|
571 |
+
{'total': 138431232, 'local': 138431232}
|
572 |
+
{'total': 138431232, 'local': 138431232}
|
573 |
+
|
574 |
+
num_params
|
575 |
+
{'total': 138431232, 'local': 138431232}
|
576 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 3 / 100 | consumed_tokens: 98.3K | elapsed_time_per_iteration_ms: 60.3 | tokens_per_sec: 544K | tokens_per_sec_per_gpu: 68K | global_batch_size: 8 | lm_loss: 10.5 | lr: 0.000296 | model_tflops_per_gpu: 87.2 | hardware_tflops_per_gpu: 87.2 | grad_norm: 2.34 | cuda_memory_allocated: 2.85G | cuda_max_memory_reserved: 6.39G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69.4G | hd_free_memory_tb: 243G
|
577 |
+
num_params
|
578 |
+
{'total': 138431232, 'local': 138431232}
|
579 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: | job_id | name | nodes | seq_len | mbs | batch_accum | gbs | mTFLOPs | hTFLOPs | tok/s/gpu | AllReduce (GB/s) | AllGather (GB/s) | ReduceScatter (GB/s) | AR Intra-node (GB/s) | AG Intra-node (GB/s) | RS Intra-node (GB/s) | Mem Alloc (GB) | Mem Res (GB) | dp | pp | tp | pp_engine | tp_mode | tp_async_comm | hidden_size | hidden_act | num_layers | num_heads | num_kv_heads | max_pos | vocab_size | tie_word_embeddings | dtype | zero_stage | ddp_bucket_cap_mb | accumulate_grad_in_fp32 | Total Params | Local Params |
|
580 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: | -------- | --------------------------------------------------------- | ----- | ------- | --- | ----------- | --- | ------- | ------- | --------- | ---------------- | ---------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------- | ------------ | -- | -- | -- | --------------------------------------------------------------------------------------------------------- | --------------------------------------- | ------------- | ----------- | ---------- | ---------- | --------- | ------------ | ------- | ---------- | ------------------- | -------------- | ---------- | ----------------- | ----------------------- | ------------ | ------------ |
|
581 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: | 12179039 | 138M_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h768_heads12 | 1 | 4096 | 1 | 1 | 8 | 87.21 | 87.21 | 67950.28 | 460.64 | 262.78 | 263.10 | 459.46 | 264.58 | 262.70 | 2.66 | 5.95 | 8 | 1 | 1 | <nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f9321e58a30> | TensorParallelLinearMode.REDUCE_SCATTER | True | 768 | silu | 12 | 12 | 12 | 4096 | 32768 | True | torch.bfloat16 | 0 | 25 | True | 138M | 138M |
|
582 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
583 |
+
12/02/2024 11:36:32 [INFO|DP=4|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
584 |
+
12/02/2024 11:36:32 [INFO|DP=7|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
585 |
+
12/02/2024 11:36:32 [INFO|DP=1|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
586 |
+
12/02/2024 11:36:32 [INFO|DP=3|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
587 |
+
12/02/2024 11:36:32 [INFO|DP=2|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
588 |
+
12/02/2024 11:36:32 [INFO|DP=6|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
589 |
+
12/02/2024 11:36:32 [INFO|DP=5|PP=0|TP=0|ip-26-0-160-225]: Throughput logging complete
|
590 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
591 |
+
slurmstepd: error: *** JOB 12179039 ON ip-26-0-160-225 CANCELLED AT 2024-12-02T11:36:32 ***
|
592 |
+
12/02/2024 11:36:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2721.99MiB. Peak allocated 2722.03MiB. Peak reserved: 6094.00MiB
|
593 |
+
slurmstepd: error: *** STEP 12179039.0 ON ip-26-0-160-225 CANCELLED AT 2024-12-02T11:36:32 ***
|
594 |
+
[2024-12-02 11:36:32,967] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
595 |
+
[2024-12-02 11:36:32,968] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269158 closing signal SIGTERM
|
596 |
+
[2024-12-02 11:36:32,968] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269159 closing signal SIGTERM
|
597 |
+
[2024-12-02 11:36:32,968] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269160 closing signal SIGTERM
|
598 |
+
[2024-12-02 11:36:32,968] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269161 closing signal SIGTERM
|
599 |
+
[2024-12-02 11:36:32,969] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269162 closing signal SIGTERM
|
600 |
+
[2024-12-02 11:36:32,969] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269163 closing signal SIGTERM
|
601 |
+
[2024-12-02 11:36:32,970] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269164 closing signal SIGTERM
|
602 |
+
[2024-12-02 11:36:32,971] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2269165 closing signal SIGTERM
|
logs/12647824-bench_3.27G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_tpmodeALL_l28_h3072_heads24.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13212437-bench_1.14G_dp8_tp1_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab32k.out
ADDED
@@ -0,0 +1,1053 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-173-121
|
400 |
+
+ export NODELIST=ip-26-0-173-121
|
401 |
+
+ NODELIST=ip-26-0-173-121
|
402 |
+
++ scontrol show hostnames ip-26-0-173-121
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-173-121
|
405 |
+
+ MASTER_NODE=ip-26-0-173-121
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-173-121'
|
419 |
+
Master node: ip-26-0-173-121
|
420 |
+
+ echo 'All nodes: ip-26-0-173-121'
|
421 |
+
All nodes: ip-26-0-173-121
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13212437 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-173-121:12356 run_train.py --config-file benchmark/configs/config_1.14G_dp8_tp1_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
425 |
+
[2024-12-09 19:08:27,038] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-09 19:08:27,038] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-09 19:08:27,038] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-09 19:08:27,038] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-09 19:08:27,038] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Config:
|
431 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Config(general=GeneralArgs(project='debug',
|
432 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: run='1.14G_dp8_tp1_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab32k',
|
433 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: seed=42,
|
434 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: step=None,
|
435 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: consumed_train_samples=None,
|
436 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
437 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: ignore_sanity_checks=True),
|
438 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: parallelism=ParallelismArgs(dp=8,
|
439 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pp=1,
|
440 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tp=1,
|
441 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f3b75c6caf0>,
|
442 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
443 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tp_linear_async_communication=True,
|
444 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: recompute_layer=False,
|
445 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tp_recompute_allgather=True,
|
446 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: expert_parallel_size=1),
|
447 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
448 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: eos_token_id=0,
|
449 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: hidden_act='silu',
|
450 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: hidden_size=2048,
|
451 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: initializer_range=0.02,
|
452 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: intermediate_size=8192,
|
453 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: is_llama_config=True,
|
454 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: max_position_embeddings=2048,
|
455 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_attention_heads=32,
|
456 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_hidden_layers=16,
|
457 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_key_value_heads=8,
|
458 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pad_token_id=None,
|
459 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pretraining_tp=1,
|
460 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rms_norm_eps=1e-05,
|
461 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_scaling=None,
|
462 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_theta=10000.0,
|
463 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_interleaved=False,
|
464 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tie_word_embeddings=True,
|
465 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: use_cache=True,
|
466 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: vocab_size=32768),
|
467 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: init_method=RandomInit(std=0.02),
|
468 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: dtype=torch.bfloat16,
|
469 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: make_vocab_size_divisible_by=1,
|
470 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: ddp_bucket_cap_mb=25),
|
471 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
472 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tokenizer_revision=None,
|
473 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tokenizer_max_length=None),
|
474 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
475 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: checkpoint_interval=10000,
|
476 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: save_initial_state=False,
|
477 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: save_final_state=False,
|
478 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: resume_checkpoint_path=None,
|
479 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: checkpoints_path_is_shared_file_system=False),
|
480 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: logging=LoggingArgs(log_level='info',
|
481 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: log_level_replica='info',
|
482 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: iteration_step_info_interval=1),
|
483 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tokens=TokensArgs(sequence_length=2048,
|
484 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: train_steps=100,
|
485 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: micro_batch_size=64,
|
486 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: batch_accumulation_per_replica=1,
|
487 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: val_check_interval=100,
|
488 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: limit_val_batches=0,
|
489 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: limit_test_batches=0),
|
490 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
491 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: adam_beta1=0.9,
|
492 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: adam_beta2=0.95,
|
493 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: torch_adam_is_fused=True,
|
494 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: name='adamW'),
|
495 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: zero_stage=1,
|
496 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: weight_decay=0.01,
|
497 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: clip_grad=1.0,
|
498 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: accumulate_grad_in_fp32=True,
|
499 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
500 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lr_warmup_steps=2,
|
501 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lr_warmup_style='linear',
|
502 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lr_decay_style='cosine',
|
503 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lr_decay_steps=13,
|
504 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lr_decay_starting_step=None,
|
505 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: min_decay_lr=1e-05)),
|
506 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
507 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: start_training_step=1,
|
508 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: data=DataArgs(dataset=None,
|
509 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: seed=42,
|
510 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_loading_workers=1))],
|
511 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: profiler=None,
|
512 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: lighteval=None,
|
513 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: s3_upload=None)
|
514 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Model Config:
|
515 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: LlamaConfig(bos_token_id=0,
|
516 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: eos_token_id=0,
|
517 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: hidden_act='silu',
|
518 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: hidden_size=2048,
|
519 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: initializer_range=0.02,
|
520 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: intermediate_size=8192,
|
521 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: is_llama_config=True,
|
522 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: max_position_embeddings=2048,
|
523 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_attention_heads=32,
|
524 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_hidden_layers=16,
|
525 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: num_key_value_heads=8,
|
526 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pad_token_id=None,
|
527 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: pretraining_tp=1,
|
528 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rms_norm_eps=1e-05,
|
529 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_scaling=None,
|
530 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_theta=10000.0,
|
531 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: rope_interleaved=False,
|
532 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: tie_word_embeddings=True,
|
533 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: use_cache=True,
|
534 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: vocab_size=32768)
|
535 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Building model..
|
536 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Initialize RoPE Theta = 10000.0
|
537 |
+
12/09/2024 19:09:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Setting PP block ranks...
|
538 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Total number of parameters: 1.04G (1984.13MiB)
|
539 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Local number of parameters: 1.04G (1984.13MiB)
|
540 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [After model building] Memory usage: 1984.15MiB. Peak allocated: 5632.00MiB Peak reserved: 16898.00MiB
|
541 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: No checkpoint path provided.
|
542 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Parametrizing model parameters using StandardParametrizator
|
543 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [Optimizer Building] Using LearningRateForSP as learning rate
|
544 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] Size of optimizer params per rank:
|
545 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 0 has 130M out of 1.04G (12.50%) params' optimizer states
|
546 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 1 has 130M out of 1.04G (12.50%) params' optimizer states
|
547 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 2 has 130M out of 1.04G (12.50%) params' optimizer states
|
548 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 3 has 130M out of 1.04G (12.50%) params' optimizer states
|
549 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 4 has 130M out of 1.04G (12.50%) params' optimizer states
|
550 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 5 has 130M out of 1.04G (12.50%) params' optimizer states
|
551 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 6 has 130M out of 1.04G (12.50%) params' optimizer states
|
552 |
+
12/09/2024 19:09:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [ZeRO sharding] DP Rank 7 has 130M out of 1.04G (12.50%) params' optimizer states
|
553 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
554 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: Using dummy data generator
|
555 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [Training Plan] There are 1 training stages
|
556 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [Stage Stable Training Stage] start from step 1
|
557 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]:
|
558 |
+
12/09/2024 19:09:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-173-121]: [Start training] datetime: 2024-12-09 19:09:03.192808 | mbs: 64 | grad_accum: 1 | global_batch_size: 512 | sequence_length: 2048 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
559 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
560 |
+
Traceback (most recent call last):
|
561 |
+
Traceback (most recent call last):
|
562 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
563 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
564 |
+
trainer.train(dataloader)
|
565 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
566 |
+
trainer.train(dataloader)
|
567 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
568 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
569 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
570 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
571 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
572 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
573 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
574 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
575 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
576 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
577 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
578 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
579 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
580 |
+
output = model(**micro_batch)
|
581 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
582 |
+
output = model(**micro_batch)
|
583 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
584 |
+
return self._call_impl(*args, **kwargs)
|
585 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
586 |
+
return self._call_impl(*args, **kwargs)
|
587 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
588 |
+
return forward_call(*args, **kwargs)
|
589 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
590 |
+
return forward_call(*args, **kwargs)
|
591 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
592 |
+
sharded_logits = self.model(
|
593 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
594 |
+
sharded_logits = self.model(
|
595 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
596 |
+
return self._call_impl(*args, **kwargs)
|
597 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
598 |
+
return self._call_impl(*args, **kwargs)
|
599 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
600 |
+
return forward_call(*args, **kwargs)
|
601 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
602 |
+
return forward_call(*args, **kwargs)
|
603 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
604 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
605 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
606 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
607 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
608 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
609 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
610 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
611 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
612 |
+
return self._call_impl(*args, **kwargs)
|
613 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
614 |
+
return self._call_impl(*args, **kwargs)
|
615 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
616 |
+
return forward_call(*args, **kwargs)
|
617 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
618 |
+
return forward_call(*args, **kwargs)
|
619 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
620 |
+
output = self.pp_block(**new_kwargs)
|
621 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
622 |
+
output = self.pp_block(**new_kwargs)
|
623 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
624 |
+
return self._call_impl(*args, **kwargs)
|
625 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
626 |
+
return self._call_impl(*args, **kwargs)
|
627 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
628 |
+
Traceback (most recent call last):
|
629 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
630 |
+
return forward_call(*args, **kwargs)
|
631 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
632 |
+
trainer.train(dataloader)
|
633 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
634 |
+
return forward_call(*args, **kwargs)
|
635 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
636 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
637 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
638 |
+
Traceback (most recent call last):
|
639 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
640 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
641 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
642 |
+
trainer.train(dataloader)
|
643 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
644 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
645 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask) File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
646 |
+
|
647 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
648 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
649 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
650 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
651 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
652 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
653 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
654 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
655 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
656 |
+
return self._call_impl(*args, **kwargs)
|
657 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
658 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
659 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
660 |
+
return self._call_impl(*args, **kwargs)
|
661 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
662 |
+
return forward_call(*args, **kwargs)
|
663 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
664 |
+
output = model(**micro_batch)
|
665 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
666 |
+
return forward_call(*args, **kwargs)
|
667 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
668 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
669 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
670 |
+
merged_states = self.gate_up_proj(hidden_states)
|
671 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
672 |
+
return self._call_impl(*args, **kwargs)merged_states = self.gate_up_proj(hidden_states)
|
673 |
+
|
674 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
675 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
676 |
+
return self._call_impl(*args, **kwargs)
|
677 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
678 |
+
return forward_call(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
679 |
+
|
680 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
681 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
682 |
+
return forward_call(*args, **kwargs)output = model(**micro_batch)
|
683 |
+
|
684 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
685 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
686 |
+
return forward_call(*args, **kwargs)
|
687 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
688 |
+
return self._call_impl(*args, **kwargs)
|
689 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
690 |
+
sharded_logits = self.model(
|
691 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
692 |
+
return column_linear(
|
693 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
694 |
+
return forward_call(*args, **kwargs)
|
695 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
696 |
+
return column_linear(
|
697 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
698 |
+
return self._call_impl(*args, **kwargs)
|
699 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
700 |
+
sharded_logits = self.model(
|
701 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
702 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
703 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
704 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
705 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
706 |
+
return forward_call(*args, **kwargs)
|
707 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
708 |
+
return self._call_impl(*args, **kwargs)
|
709 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
710 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
711 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
712 |
+
return forward_call(*args, **kwargs)
|
713 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
714 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
715 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
716 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
717 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
718 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
719 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
720 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
721 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
722 |
+
return func(*args, **kwargs)
|
723 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
724 |
+
return self._call_impl(*args, **kwargs)
|
725 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
726 |
+
return func(*args, **kwargs)
|
727 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
728 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
729 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
730 |
+
return F.linear(gathered_tensor, weight, bias)
|
731 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 5 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
732 |
+
return forward_call(*args, **kwargs)
|
733 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
734 |
+
return F.linear(gathered_tensor, weight, bias)
|
735 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 4 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
736 |
+
return self._call_impl(*args, **kwargs)
|
737 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
738 |
+
output = self.pp_block(**new_kwargs)
|
739 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
740 |
+
return forward_call(*args, **kwargs)
|
741 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
742 |
+
return self._call_impl(*args, **kwargs)
|
743 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
744 |
+
output = self.pp_block(**new_kwargs)
|
745 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
746 |
+
return forward_call(*args, **kwargs)
|
747 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
748 |
+
return self._call_impl(*args, **kwargs)
|
749 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
750 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
751 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
752 |
+
return forward_call(*args, **kwargs)
|
753 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
754 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
755 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
756 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
757 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
758 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
759 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
760 |
+
return self._call_impl(*args, **kwargs)
|
761 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
762 |
+
Traceback (most recent call last):
|
763 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
764 |
+
return self._call_impl(*args, **kwargs)
|
765 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
766 |
+
return forward_call(*args, **kwargs)
|
767 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
768 |
+
return forward_call(*args, **kwargs)
|
769 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
770 |
+
trainer.train(dataloader)
|
771 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
772 |
+
merged_states = self.gate_up_proj(hidden_states)
|
773 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
774 |
+
merged_states = self.gate_up_proj(hidden_states)
|
775 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
776 |
+
Traceback (most recent call last):
|
777 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
778 |
+
return self._call_impl(*args, **kwargs)
|
779 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
780 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
781 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
782 |
+
return self._call_impl(*args, **kwargs)
|
783 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
784 |
+
trainer.train(dataloader)
|
785 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
786 |
+
return forward_call(*args, **kwargs)
|
787 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
788 |
+
Traceback (most recent call last):
|
789 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
790 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
791 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
792 |
+
return forward_call(*args, **kwargs)
|
793 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
794 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
795 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
796 |
+
return column_linear(
|
797 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
798 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
799 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
800 |
+
trainer.train(dataloader)
|
801 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
802 |
+
return column_linear(
|
803 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
804 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
805 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
806 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
807 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
808 |
+
output = model(**micro_batch)
|
809 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
810 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
811 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
812 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
813 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
814 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
815 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
816 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
817 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
818 |
+
return self._call_impl(*args, **kwargs)
|
819 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
820 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
821 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
822 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
823 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
824 |
+
output = model(**micro_batch)
|
825 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
826 |
+
return func(*args, **kwargs)
|
827 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
828 |
+
return forward_call(*args, **kwargs)
|
829 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
830 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
831 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
832 |
+
return func(*args, **kwargs)
|
833 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
834 |
+
return self._call_impl(*args, **kwargs)
|
835 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
836 |
+
return F.linear(gathered_tensor, weight, bias)
|
837 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 6 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
838 |
+
sharded_logits = self.model(
|
839 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
840 |
+
return forward_call(*args, **kwargs)
|
841 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
842 |
+
output = model(**micro_batch)
|
843 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
844 |
+
return F.linear(gathered_tensor, weight, bias)
|
845 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 1 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
846 |
+
return self._call_impl(*args, **kwargs)
|
847 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
848 |
+
sharded_logits = self.model(
|
849 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
850 |
+
return self._call_impl(*args, **kwargs)
|
851 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
852 |
+
return forward_call(*args, **kwargs)
|
853 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
854 |
+
return self._call_impl(*args, **kwargs)
|
855 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
856 |
+
return forward_call(*args, **kwargs)
|
857 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
858 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
859 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
860 |
+
return forward_call(*args, **kwargs)
|
861 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
862 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
863 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
864 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
865 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
866 |
+
sharded_logits = self.model(
|
867 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
868 |
+
return self._call_impl(*args, **kwargs)
|
869 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
870 |
+
return forward_call(*args, **kwargs)
|
871 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
872 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
873 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
874 |
+
return self._call_impl(*args, **kwargs)
|
875 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
876 |
+
output = self.pp_block(**new_kwargs)
|
877 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
878 |
+
return self._call_impl(*args, **kwargs)
|
879 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
880 |
+
return forward_call(*args, **kwargs)
|
881 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
882 |
+
return forward_call(*args, **kwargs)
|
883 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
884 |
+
return self._call_impl(*args, **kwargs)
|
885 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
886 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
887 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
888 |
+
output = self.pp_block(**new_kwargs)
|
889 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
890 |
+
return forward_call(*args, **kwargs)
|
891 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
892 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
893 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
894 |
+
return self._call_impl(*args, **kwargs)
|
895 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
896 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
897 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
898 |
+
return self._call_impl(*args, **kwargs)
|
899 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
900 |
+
return forward_call(*args, **kwargs)
|
901 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
902 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
903 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
904 |
+
return forward_call(*args, **kwargs)
|
905 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
906 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
907 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
908 |
+
return self._call_impl(*args, **kwargs)
|
909 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
910 |
+
output = self.pp_block(**new_kwargs)
|
911 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
912 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
913 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
914 |
+
return forward_call(*args, **kwargs)
|
915 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
916 |
+
return self._call_impl(*args, **kwargs)
|
917 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
918 |
+
return self._call_impl(*args, **kwargs)
|
919 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
920 |
+
merged_states = self.gate_up_proj(hidden_states)
|
921 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
922 |
+
return forward_call(*args, **kwargs)
|
923 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
924 |
+
return forward_call(*args, **kwargs)
|
925 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
926 |
+
return self._call_impl(*args, **kwargs)
|
927 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
928 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
929 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
930 |
+
return forward_call(*args, **kwargs)
|
931 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
932 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
933 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
934 |
+
return self._call_impl(*args, **kwargs)
|
935 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
936 |
+
return column_linear(
|
937 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
938 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
939 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
940 |
+
return forward_call(*args, **kwargs)
|
941 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 201, in forward
|
942 |
+
return self._call_impl(*args, **kwargs)
|
943 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
944 |
+
return forward_call(*args, **kwargs)
|
945 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
946 |
+
merged_states = self.gate_up_proj(hidden_states)
|
947 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
948 |
+
return self._call_impl(*args, **kwargs)
|
949 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
950 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
951 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
952 |
+
return forward_call(*args, **kwargs)
|
953 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
954 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
955 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
956 |
+
return column_linear(
|
957 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
958 |
+
return func(*args, **kwargs)
|
959 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
960 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
961 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
962 |
+
return F.linear(gathered_tensor, weight, bias)
|
963 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 3 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
964 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
965 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
966 |
+
return func(*args, **kwargs)
|
967 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
968 |
+
return F.linear(gathered_tensor, weight, bias)
|
969 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 2 has a total capacty of 79.33 GiB of which 3.98 GiB is free. Including non-PyTorch memory, this process has 75.34 GiB memory in use. Of the allocated memory 66.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
970 |
+
return self.act(gate_states) * up_states
|
971 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
972 |
+
return self._call_impl(*args, **kwargs)
|
973 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
974 |
+
return forward_call(*args, **kwargs)
|
975 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/nn/activations.py", line 149, in forward
|
976 |
+
return nn.functional.silu(input)
|
977 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/functional.py", line 2072, in silu
|
978 |
+
return torch._C._nn.silu(input)
|
979 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 7 has a total capacty of 79.33 GiB of which 699.94 MiB is free. Including non-PyTorch memory, this process has 78.63 GiB memory in use. Of the allocated memory 70.43 GiB is allocated by PyTorch, and 851.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
980 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
981 |
+
[2024-12-09 19:09:07,412] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 680780 closing signal SIGTERM
|
982 |
+
[2024-12-09 19:09:07,977] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 1 (pid: 680781) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
983 |
+
Traceback (most recent call last):
|
984 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
985 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
986 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
987 |
+
return f(*args, **kwargs)
|
988 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
989 |
+
run(args)
|
990 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
991 |
+
elastic_launch(
|
992 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
993 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
994 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
995 |
+
raise ChildFailedError(
|
996 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
997 |
+
============================================================
|
998 |
+
run_train.py FAILED
|
999 |
+
------------------------------------------------------------
|
1000 |
+
Failures:
|
1001 |
+
[1]:
|
1002 |
+
time : 2024-12-09_19:09:07
|
1003 |
+
host : ip-26-0-173-121.ec2.internal
|
1004 |
+
rank : 2 (local_rank: 2)
|
1005 |
+
exitcode : 1 (pid: 680782)
|
1006 |
+
error_file: <N/A>
|
1007 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1008 |
+
[2]:
|
1009 |
+
time : 2024-12-09_19:09:07
|
1010 |
+
host : ip-26-0-173-121.ec2.internal
|
1011 |
+
rank : 3 (local_rank: 3)
|
1012 |
+
exitcode : 1 (pid: 680783)
|
1013 |
+
error_file: <N/A>
|
1014 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1015 |
+
[3]:
|
1016 |
+
time : 2024-12-09_19:09:07
|
1017 |
+
host : ip-26-0-173-121.ec2.internal
|
1018 |
+
rank : 4 (local_rank: 4)
|
1019 |
+
exitcode : 1 (pid: 680784)
|
1020 |
+
error_file: <N/A>
|
1021 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1022 |
+
[4]:
|
1023 |
+
time : 2024-12-09_19:09:07
|
1024 |
+
host : ip-26-0-173-121.ec2.internal
|
1025 |
+
rank : 5 (local_rank: 5)
|
1026 |
+
exitcode : 1 (pid: 680785)
|
1027 |
+
error_file: <N/A>
|
1028 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1029 |
+
[5]:
|
1030 |
+
time : 2024-12-09_19:09:07
|
1031 |
+
host : ip-26-0-173-121.ec2.internal
|
1032 |
+
rank : 6 (local_rank: 6)
|
1033 |
+
exitcode : 1 (pid: 680786)
|
1034 |
+
error_file: <N/A>
|
1035 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1036 |
+
[6]:
|
1037 |
+
time : 2024-12-09_19:09:07
|
1038 |
+
host : ip-26-0-173-121.ec2.internal
|
1039 |
+
rank : 7 (local_rank: 7)
|
1040 |
+
exitcode : 1 (pid: 680787)
|
1041 |
+
error_file: <N/A>
|
1042 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1043 |
+
------------------------------------------------------------
|
1044 |
+
Root Cause (first observed failure):
|
1045 |
+
[0]:
|
1046 |
+
time : 2024-12-09_19:09:07
|
1047 |
+
host : ip-26-0-173-121.ec2.internal
|
1048 |
+
rank : 1 (local_rank: 1)
|
1049 |
+
exitcode : 1 (pid: 680781)
|
1050 |
+
error_file: <N/A>
|
1051 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1052 |
+
============================================================
|
1053 |
+
srun: error: ip-26-0-173-121: task 0: Exited with exit code 1
|
logs/13212476-bench_1.34G_dp8_tp1_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,1065 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-172-73
|
400 |
+
+ export NODELIST=ip-26-0-172-73
|
401 |
+
+ NODELIST=ip-26-0-172-73
|
402 |
+
++ scontrol show hostnames ip-26-0-172-73
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-172-73
|
405 |
+
+ MASTER_NODE=ip-26-0-172-73
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-172-73'
|
419 |
+
Master node: ip-26-0-172-73
|
420 |
+
+ echo 'All nodes: ip-26-0-172-73'
|
421 |
+
All nodes: ip-26-0-172-73
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13212476 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-172-73:12356 run_train.py --config-file benchmark/configs/config_1.34G_dp8_tp1_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml
|
425 |
+
[2024-12-09 19:11:58,813] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-09 19:11:58,813] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-09 19:11:58,813] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-09 19:11:58,813] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-09 19:11:58,813] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Config:
|
431 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Config(general=GeneralArgs(project='debug',
|
432 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: run='1.34G_dp8_tp1_pp1_acc8_mbs2_seq32768_zero1_tpmodeRED_vocab131k',
|
433 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: seed=42,
|
434 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: step=None,
|
435 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: consumed_train_samples=None,
|
436 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
437 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: ignore_sanity_checks=True),
|
438 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: parallelism=ParallelismArgs(dp=8,
|
439 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pp=1,
|
440 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tp=1,
|
441 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f302c6b4e80>,
|
442 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
443 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tp_linear_async_communication=True,
|
444 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: recompute_layer=False,
|
445 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tp_recompute_allgather=True,
|
446 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: expert_parallel_size=1),
|
447 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
448 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: eos_token_id=0,
|
449 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: hidden_act='silu',
|
450 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: hidden_size=2048,
|
451 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: initializer_range=0.02,
|
452 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: intermediate_size=8192,
|
453 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: is_llama_config=True,
|
454 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: max_position_embeddings=32768,
|
455 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_attention_heads=32,
|
456 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_hidden_layers=16,
|
457 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_key_value_heads=8,
|
458 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pad_token_id=None,
|
459 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pretraining_tp=1,
|
460 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rms_norm_eps=1e-05,
|
461 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_scaling=None,
|
462 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_theta=10000.0,
|
463 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_interleaved=False,
|
464 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tie_word_embeddings=True,
|
465 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: use_cache=True,
|
466 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: vocab_size=131072),
|
467 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: init_method=RandomInit(std=0.02),
|
468 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: dtype=torch.bfloat16,
|
469 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: make_vocab_size_divisible_by=1,
|
470 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: ddp_bucket_cap_mb=25),
|
471 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
472 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tokenizer_revision=None,
|
473 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tokenizer_max_length=None),
|
474 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
475 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: checkpoint_interval=10000,
|
476 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: save_initial_state=False,
|
477 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: save_final_state=False,
|
478 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: resume_checkpoint_path=None,
|
479 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: checkpoints_path_is_shared_file_system=False),
|
480 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: logging=LoggingArgs(log_level='info',
|
481 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: log_level_replica='info',
|
482 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: iteration_step_info_interval=1),
|
483 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tokens=TokensArgs(sequence_length=32768,
|
484 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: train_steps=100,
|
485 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: micro_batch_size=2,
|
486 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: batch_accumulation_per_replica=8,
|
487 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: val_check_interval=100,
|
488 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: limit_val_batches=0,
|
489 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: limit_test_batches=0),
|
490 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
491 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: adam_beta1=0.9,
|
492 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: adam_beta2=0.95,
|
493 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: torch_adam_is_fused=True,
|
494 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: name='adamW'),
|
495 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: zero_stage=1,
|
496 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: weight_decay=0.01,
|
497 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: clip_grad=1.0,
|
498 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: accumulate_grad_in_fp32=True,
|
499 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
500 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lr_warmup_steps=2,
|
501 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lr_warmup_style='linear',
|
502 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lr_decay_style='cosine',
|
503 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lr_decay_steps=13,
|
504 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lr_decay_starting_step=None,
|
505 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: min_decay_lr=1e-05)),
|
506 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
507 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: start_training_step=1,
|
508 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: data=DataArgs(dataset=None,
|
509 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: seed=42,
|
510 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_loading_workers=1))],
|
511 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: profiler=None,
|
512 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: lighteval=None,
|
513 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: s3_upload=None)
|
514 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Model Config:
|
515 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: LlamaConfig(bos_token_id=0,
|
516 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: eos_token_id=0,
|
517 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: hidden_act='silu',
|
518 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: hidden_size=2048,
|
519 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: initializer_range=0.02,
|
520 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: intermediate_size=8192,
|
521 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: is_llama_config=True,
|
522 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: max_position_embeddings=32768,
|
523 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_attention_heads=32,
|
524 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_hidden_layers=16,
|
525 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: num_key_value_heads=8,
|
526 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pad_token_id=None,
|
527 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: pretraining_tp=1,
|
528 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rms_norm_eps=1e-05,
|
529 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_scaling=None,
|
530 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_theta=10000.0,
|
531 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: rope_interleaved=False,
|
532 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: tie_word_embeddings=True,
|
533 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: use_cache=True,
|
534 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: vocab_size=131072)
|
535 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Building model..
|
536 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Initialize RoPE Theta = 10000.0
|
537 |
+
12/09/2024 19:12:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Setting PP block ranks...
|
538 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Total number of parameters: 1.24G (2368.13MiB)
|
539 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Local number of parameters: 1.24G (2368.13MiB)
|
540 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [After model building] Memory usage: 2368.15MiB. Peak allocated: 5632.00MiB Peak reserved: 17922.00MiB
|
541 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: No checkpoint path provided.
|
542 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Parametrizing model parameters using StandardParametrizator
|
543 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [Optimizer Building] Using LearningRateForSP as learning rate
|
544 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] Size of optimizer params per rank:
|
545 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 0 has 155M out of 1.24G (12.50%) params' optimizer states
|
546 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 1 has 155M out of 1.24G (12.50%) params' optimizer states
|
547 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 2 has 155M out of 1.24G (12.50%) params' optimizer states
|
548 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 3 has 155M out of 1.24G (12.50%) params' optimizer states
|
549 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 4 has 155M out of 1.24G (12.50%) params' optimizer states
|
550 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 5 has 155M out of 1.24G (12.50%) params' optimizer states
|
551 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 6 has 155M out of 1.24G (12.50%) params' optimizer states
|
552 |
+
12/09/2024 19:12:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [ZeRO sharding] DP Rank 7 has 155M out of 1.24G (12.50%) params' optimizer states
|
553 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
554 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: Using dummy data generator
|
555 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [Training Plan] There are 1 training stages
|
556 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [Stage Stable Training Stage] start from step 1
|
557 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]:
|
558 |
+
12/09/2024 19:12:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-172-73]: [Start training] datetime: 2024-12-09 19:12:35.336949 | mbs: 2 | grad_accum: 8 | global_batch_size: 128 | sequence_length: 32768 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
559 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
560 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
561 |
+
Traceback (most recent call last):
|
562 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
563 |
+
Traceback (most recent call last):
|
564 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
565 |
+
trainer.train(dataloader)
|
566 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
567 |
+
trainer.train(dataloader)
|
568 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
569 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
570 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
571 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
572 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
573 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
574 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
575 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
576 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
577 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
578 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
579 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
580 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
581 |
+
output = model(**micro_batch)
|
582 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
583 |
+
output = model(**micro_batch)
|
584 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
585 |
+
return self._call_impl(*args, **kwargs)
|
586 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
587 |
+
return forward_call(*args, **kwargs)
|
588 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
589 |
+
return self._call_impl(*args, **kwargs)
|
590 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
591 |
+
sharded_logits = self.model(
|
592 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
593 |
+
return forward_call(*args, **kwargs)
|
594 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
595 |
+
return self._call_impl(*args, **kwargs)
|
596 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
597 |
+
sharded_logits = self.model(
|
598 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
599 |
+
return forward_call(*args, **kwargs)
|
600 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
601 |
+
return self._call_impl(*args, **kwargs)
|
602 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
603 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
604 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
605 |
+
return forward_call(*args, **kwargs)
|
606 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
607 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
608 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
609 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
610 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
611 |
+
return self._call_impl(*args, **kwargs)
|
612 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
613 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
614 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
615 |
+
return forward_call(*args, **kwargs)
|
616 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
617 |
+
return self._call_impl(*args, **kwargs)
|
618 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
619 |
+
output = self.pp_block(**new_kwargs)
|
620 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
621 |
+
return forward_call(*args, **kwargs)
|
622 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
623 |
+
return self._call_impl(*args, **kwargs)
|
624 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
625 |
+
output = self.pp_block(**new_kwargs)
|
626 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
627 |
+
return forward_call(*args, **kwargs)
|
628 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
629 |
+
return self._call_impl(*args, **kwargs)
|
630 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
631 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
632 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
633 |
+
return forward_call(*args, **kwargs)
|
634 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
635 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
636 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
637 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
638 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
639 |
+
return self._call_impl(*args, **kwargs)
|
640 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
641 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
642 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
643 |
+
return forward_call(*args, **kwargs)
|
644 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
645 |
+
return self._call_impl(*args, **kwargs)
|
646 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
647 |
+
attention_output = self.attention(
|
648 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
649 |
+
return forward_call(*args, **kwargs)
|
650 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
651 |
+
return self._call_impl(*args, **kwargs)
|
652 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
653 |
+
attention_output = self.attention(
|
654 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
655 |
+
return forward_call(*args, **kwargs)
|
656 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
657 |
+
return self._call_impl(*args, **kwargs)
|
658 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
659 |
+
return forward_call(*args, **kwargs)
|
660 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
661 |
+
return func(*args, **kwargs)
|
662 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
663 |
+
return func(*args, **kwargs)
|
664 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
665 |
+
attn_output = flash_attn_varlen_func(
|
666 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
667 |
+
attn_output = flash_attn_varlen_func(
|
668 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
669 |
+
return FlashAttnVarlenFunc.apply(
|
670 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
671 |
+
return FlashAttnVarlenFunc.apply(
|
672 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
673 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
674 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
675 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
676 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
677 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
678 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
679 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
680 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 4 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
681 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
682 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
683 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
684 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 1 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
685 |
+
Traceback (most recent call last):
|
686 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
687 |
+
trainer.train(dataloader)
|
688 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
689 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
690 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
691 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
692 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
693 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
694 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
695 |
+
output = model(**micro_batch)
|
696 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
697 |
+
return self._call_impl(*args, **kwargs)
|
698 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
699 |
+
return forward_call(*args, **kwargs)
|
700 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
701 |
+
sharded_logits = self.model(
|
702 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
703 |
+
return self._call_impl(*args, **kwargs)
|
704 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
705 |
+
return forward_call(*args, **kwargs)
|
706 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
707 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
708 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
709 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
710 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
711 |
+
return self._call_impl(*args, **kwargs)
|
712 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
713 |
+
return forward_call(*args, **kwargs)
|
714 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
715 |
+
output = self.pp_block(**new_kwargs)
|
716 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
717 |
+
return self._call_impl(*args, **kwargs)
|
718 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
719 |
+
return forward_call(*args, **kwargs)
|
720 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
721 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
722 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
723 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
724 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
725 |
+
return self._call_impl(*args, **kwargs)
|
726 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
727 |
+
return forward_call(*args, **kwargs)
|
728 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
729 |
+
attention_output = self.attention(
|
730 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
731 |
+
return self._call_impl(*args, **kwargs)
|
732 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
733 |
+
return forward_call(*args, **kwargs)
|
734 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
735 |
+
return func(*args, **kwargs)
|
736 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
737 |
+
attn_output = flash_attn_varlen_func(
|
738 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
739 |
+
return FlashAttnVarlenFunc.apply(
|
740 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
741 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
742 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
743 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
744 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
745 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
746 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 3 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
747 |
+
Traceback (most recent call last):
|
748 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
749 |
+
trainer.train(dataloader)
|
750 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
751 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
752 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
753 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
754 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
755 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
756 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
757 |
+
output = model(**micro_batch)
|
758 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
759 |
+
return self._call_impl(*args, **kwargs)
|
760 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
761 |
+
return forward_call(*args, **kwargs)
|
762 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
763 |
+
sharded_logits = self.model(
|
764 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
765 |
+
return self._call_impl(*args, **kwargs)
|
766 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
767 |
+
return forward_call(*args, **kwargs)
|
768 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
769 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
770 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
771 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
772 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
773 |
+
return self._call_impl(*args, **kwargs)
|
774 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
775 |
+
return forward_call(*args, **kwargs)
|
776 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
777 |
+
output = self.pp_block(**new_kwargs)
|
778 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
779 |
+
return self._call_impl(*args, **kwargs)
|
780 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
781 |
+
return forward_call(*args, **kwargs)
|
782 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
783 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
784 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
785 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
786 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
787 |
+
return self._call_impl(*args, **kwargs)
|
788 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
789 |
+
return forward_call(*args, **kwargs)
|
790 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
791 |
+
Traceback (most recent call last):
|
792 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
793 |
+
attention_output = self.attention(
|
794 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
795 |
+
trainer.train(dataloader)
|
796 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
797 |
+
return self._call_impl(*args, **kwargs)
|
798 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
799 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
800 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
801 |
+
return forward_call(*args, **kwargs)
|
802 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
803 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
804 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
805 |
+
return func(*args, **kwargs)
|
806 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
807 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
808 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
809 |
+
attn_output = flash_attn_varlen_func(
|
810 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
811 |
+
output = model(**micro_batch)
|
812 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
813 |
+
return FlashAttnVarlenFunc.apply(
|
814 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
815 |
+
return self._call_impl(*args, **kwargs)
|
816 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
817 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
818 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
819 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
820 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
821 |
+
return forward_call(*args, **kwargs)
|
822 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
823 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
824 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 5 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
825 |
+
sharded_logits = self.model(
|
826 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
827 |
+
return self._call_impl(*args, **kwargs)
|
828 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
829 |
+
return forward_call(*args, **kwargs)
|
830 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
831 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
832 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
833 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
834 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
835 |
+
return self._call_impl(*args, **kwargs)
|
836 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
837 |
+
return forward_call(*args, **kwargs)
|
838 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
839 |
+
output = self.pp_block(**new_kwargs)
|
840 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
841 |
+
return self._call_impl(*args, **kwargs)
|
842 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
843 |
+
return forward_call(*args, **kwargs)
|
844 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
845 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
846 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
847 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
848 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
849 |
+
return self._call_impl(*args, **kwargs)
|
850 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
851 |
+
return forward_call(*args, **kwargs)
|
852 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
853 |
+
merged_states = self.gate_up_proj(hidden_states)
|
854 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
855 |
+
return self._call_impl(*args, **kwargs)
|
856 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
857 |
+
Traceback (most recent call last):
|
858 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
859 |
+
return forward_call(*args, **kwargs)
|
860 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
861 |
+
trainer.train(dataloader)
|
862 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
863 |
+
return column_linear(
|
864 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 441, in column_linear
|
865 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
866 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
867 |
+
return _ColumnLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode, tp_recompute_allgather)
|
868 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
869 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
870 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
871 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
872 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/utils.py", line 40, in wrapper
|
873 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
874 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
875 |
+
return func(*args, **kwargs)
|
876 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 140, in forward
|
877 |
+
output = model(**micro_batch)
|
878 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
879 |
+
return F.linear(gathered_tensor, weight, bias)
|
880 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 7 has a total capacty of 79.33 GiB of which 73.94 MiB is free. Including non-PyTorch memory, this process has 79.25 GiB memory in use. Of the allocated memory 71.44 GiB is allocated by PyTorch, and 440.41 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
881 |
+
return self._call_impl(*args, **kwargs)
|
882 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
883 |
+
return forward_call(*args, **kwargs)
|
884 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
885 |
+
sharded_logits = self.model(
|
886 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
887 |
+
return self._call_impl(*args, **kwargs)
|
888 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
889 |
+
return forward_call(*args, **kwargs)
|
890 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
891 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
892 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
893 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
894 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
895 |
+
return self._call_impl(*args, **kwargs)
|
896 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
897 |
+
return forward_call(*args, **kwargs)
|
898 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
899 |
+
output = self.pp_block(**new_kwargs)
|
900 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
901 |
+
return self._call_impl(*args, **kwargs)
|
902 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
903 |
+
return forward_call(*args, **kwargs)
|
904 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
905 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
906 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
907 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
908 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
909 |
+
return self._call_impl(*args, **kwargs)
|
910 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
911 |
+
return forward_call(*args, **kwargs)
|
912 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
913 |
+
attention_output = self.attention(
|
914 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
915 |
+
return self._call_impl(*args, **kwargs)
|
916 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
917 |
+
return forward_call(*args, **kwargs)
|
918 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
919 |
+
return func(*args, **kwargs)
|
920 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
921 |
+
attn_output = flash_attn_varlen_func(
|
922 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
923 |
+
return FlashAttnVarlenFunc.apply(
|
924 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
925 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
926 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
927 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
928 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
929 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
930 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 2 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
931 |
+
Traceback (most recent call last):
|
932 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
933 |
+
trainer.train(dataloader)
|
934 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
935 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
936 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
937 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
938 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
939 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
940 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
941 |
+
output = model(**micro_batch)
|
942 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
943 |
+
return self._call_impl(*args, **kwargs)
|
944 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
945 |
+
return forward_call(*args, **kwargs)
|
946 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
947 |
+
sharded_logits = self.model(
|
948 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
949 |
+
return self._call_impl(*args, **kwargs)
|
950 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
951 |
+
return forward_call(*args, **kwargs)
|
952 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
953 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
954 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
955 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
956 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
957 |
+
return self._call_impl(*args, **kwargs)
|
958 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
959 |
+
return forward_call(*args, **kwargs)
|
960 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
961 |
+
output = self.pp_block(**new_kwargs)
|
962 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
963 |
+
return self._call_impl(*args, **kwargs)
|
964 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
965 |
+
return forward_call(*args, **kwargs)
|
966 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
967 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
968 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 725, in _core_forward
|
969 |
+
output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
970 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
971 |
+
return self._call_impl(*args, **kwargs)
|
972 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
973 |
+
return forward_call(*args, **kwargs)
|
974 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 679, in forward
|
975 |
+
attention_output = self.attention(
|
976 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
977 |
+
return self._call_impl(*args, **kwargs)
|
978 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
979 |
+
return forward_call(*args, **kwargs)
|
980 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/utils.py", line 115, in wrapper
|
981 |
+
return func(*args, **kwargs)
|
982 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 286, in forward
|
983 |
+
attn_output = flash_attn_varlen_func(
|
984 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 1059, in flash_attn_varlen_func
|
985 |
+
return FlashAttnVarlenFunc.apply(
|
986 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
987 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
988 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 576, in forward
|
989 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(
|
990 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py", line 85, in _flash_attn_varlen_forward
|
991 |
+
out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(
|
992 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 6 has a total capacty of 79.33 GiB of which 121.94 MiB is free. Including non-PyTorch memory, this process has 79.20 GiB memory in use. Of the allocated memory 70.81 GiB is allocated by PyTorch, and 320.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
993 |
+
[2024-12-09 19:12:38,941] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 947736 closing signal SIGTERM
|
994 |
+
[2024-12-09 19:12:39,505] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 1 (pid: 947737) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
995 |
+
Traceback (most recent call last):
|
996 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
997 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
998 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
999 |
+
return f(*args, **kwargs)
|
1000 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
1001 |
+
run(args)
|
1002 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
1003 |
+
elastic_launch(
|
1004 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
1005 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
1006 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
1007 |
+
raise ChildFailedError(
|
1008 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
1009 |
+
============================================================
|
1010 |
+
run_train.py FAILED
|
1011 |
+
------------------------------------------------------------
|
1012 |
+
Failures:
|
1013 |
+
[1]:
|
1014 |
+
time : 2024-12-09_19:12:38
|
1015 |
+
host : ip-26-0-172-73.ec2.internal
|
1016 |
+
rank : 2 (local_rank: 2)
|
1017 |
+
exitcode : 1 (pid: 947738)
|
1018 |
+
error_file: <N/A>
|
1019 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1020 |
+
[2]:
|
1021 |
+
time : 2024-12-09_19:12:38
|
1022 |
+
host : ip-26-0-172-73.ec2.internal
|
1023 |
+
rank : 3 (local_rank: 3)
|
1024 |
+
exitcode : 1 (pid: 947739)
|
1025 |
+
error_file: <N/A>
|
1026 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1027 |
+
[3]:
|
1028 |
+
time : 2024-12-09_19:12:38
|
1029 |
+
host : ip-26-0-172-73.ec2.internal
|
1030 |
+
rank : 4 (local_rank: 4)
|
1031 |
+
exitcode : 1 (pid: 947740)
|
1032 |
+
error_file: <N/A>
|
1033 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1034 |
+
[4]:
|
1035 |
+
time : 2024-12-09_19:12:38
|
1036 |
+
host : ip-26-0-172-73.ec2.internal
|
1037 |
+
rank : 5 (local_rank: 5)
|
1038 |
+
exitcode : 1 (pid: 947741)
|
1039 |
+
error_file: <N/A>
|
1040 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1041 |
+
[5]:
|
1042 |
+
time : 2024-12-09_19:12:38
|
1043 |
+
host : ip-26-0-172-73.ec2.internal
|
1044 |
+
rank : 6 (local_rank: 6)
|
1045 |
+
exitcode : 1 (pid: 947742)
|
1046 |
+
error_file: <N/A>
|
1047 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1048 |
+
[6]:
|
1049 |
+
time : 2024-12-09_19:12:38
|
1050 |
+
host : ip-26-0-172-73.ec2.internal
|
1051 |
+
rank : 7 (local_rank: 7)
|
1052 |
+
exitcode : 1 (pid: 947743)
|
1053 |
+
error_file: <N/A>
|
1054 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1055 |
+
------------------------------------------------------------
|
1056 |
+
Root Cause (first observed failure):
|
1057 |
+
[0]:
|
1058 |
+
time : 2024-12-09_19:12:38
|
1059 |
+
host : ip-26-0-172-73.ec2.internal
|
1060 |
+
rank : 1 (local_rank: 1)
|
1061 |
+
exitcode : 1 (pid: 947737)
|
1062 |
+
error_file: <N/A>
|
1063 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
1064 |
+
============================================================
|
1065 |
+
srun: error: ip-26-0-172-73: task 0: Exited with exit code 1
|
logs/13264457-bench_1.34G_dp16_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13264673-bench_1.14G_dp4_tp2_pp1_acc128_mbs1_seq8192_zero1_tpmodeALL_vocab32k.out
ADDED
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-168-95
|
400 |
+
+ export NODELIST=ip-26-0-168-95
|
401 |
+
+ NODELIST=ip-26-0-168-95
|
402 |
+
++ scontrol show hostnames ip-26-0-168-95
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-168-95
|
405 |
+
+ MASTER_NODE=ip-26-0-168-95
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-168-95'
|
419 |
+
Master node: ip-26-0-168-95
|
420 |
+
+ echo 'All nodes: ip-26-0-168-95'
|
421 |
+
All nodes: ip-26-0-168-95
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13264673 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-168-95:12356 run_train.py --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc128_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
425 |
+
[2024-12-09 22:39:58,855] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-09 22:39:58,856] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-09 22:39:58,856] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-09 22:39:58,856] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-09 22:39:58,856] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Config:
|
431 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Config(general=GeneralArgs(project='debug',
|
432 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: run='1.14G_dp4_tp2_pp1_acc128_mbs1_seq8192_zero1_tpmodeALL_vocab32k',
|
433 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: seed=42,
|
434 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: step=None,
|
435 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: consumed_train_samples=None,
|
436 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
437 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: ignore_sanity_checks=True),
|
438 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: parallelism=ParallelismArgs(dp=4,
|
439 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pp=1,
|
440 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tp=2,
|
441 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f1a84878e20>,
|
442 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tp_mode=<TensorParallelLinearMode.ALL_REDUCE: 1>,
|
443 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tp_linear_async_communication=False,
|
444 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: recompute_layer=False,
|
445 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tp_recompute_allgather=True,
|
446 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: expert_parallel_size=1),
|
447 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
448 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: eos_token_id=0,
|
449 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: hidden_act='silu',
|
450 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: hidden_size=2048,
|
451 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: initializer_range=0.02,
|
452 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: intermediate_size=8192,
|
453 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: is_llama_config=True,
|
454 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: max_position_embeddings=8192,
|
455 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_attention_heads=32,
|
456 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_hidden_layers=16,
|
457 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_key_value_heads=8,
|
458 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pad_token_id=None,
|
459 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pretraining_tp=1,
|
460 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rms_norm_eps=1e-05,
|
461 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_scaling=None,
|
462 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_theta=10000.0,
|
463 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_interleaved=False,
|
464 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tie_word_embeddings=True,
|
465 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: use_cache=True,
|
466 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: vocab_size=32768),
|
467 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: init_method=RandomInit(std=0.02),
|
468 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: dtype=torch.bfloat16,
|
469 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: make_vocab_size_divisible_by=1,
|
470 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: ddp_bucket_cap_mb=25),
|
471 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
472 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tokenizer_revision=None,
|
473 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tokenizer_max_length=None),
|
474 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
475 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: checkpoint_interval=10000,
|
476 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: save_initial_state=False,
|
477 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: save_final_state=False,
|
478 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: resume_checkpoint_path=None,
|
479 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: checkpoints_path_is_shared_file_system=False),
|
480 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: logging=LoggingArgs(log_level='info',
|
481 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: log_level_replica='info',
|
482 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: iteration_step_info_interval=1),
|
483 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tokens=TokensArgs(sequence_length=8192,
|
484 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: train_steps=100,
|
485 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: micro_batch_size=1,
|
486 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: batch_accumulation_per_replica=128,
|
487 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: val_check_interval=100,
|
488 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: limit_val_batches=0,
|
489 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: limit_test_batches=0),
|
490 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
491 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: adam_beta1=0.9,
|
492 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: adam_beta2=0.95,
|
493 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: torch_adam_is_fused=True,
|
494 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: name='adamW'),
|
495 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: zero_stage=1,
|
496 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: weight_decay=0.01,
|
497 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: clip_grad=1.0,
|
498 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: accumulate_grad_in_fp32=True,
|
499 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
500 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lr_warmup_steps=2,
|
501 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lr_warmup_style='linear',
|
502 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lr_decay_style='cosine',
|
503 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lr_decay_steps=13,
|
504 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lr_decay_starting_step=None,
|
505 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: min_decay_lr=1e-05)),
|
506 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
507 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: start_training_step=1,
|
508 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: data=DataArgs(dataset=None,
|
509 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: seed=42,
|
510 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_loading_workers=1))],
|
511 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: profiler=None,
|
512 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: lighteval=None,
|
513 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: s3_upload=None)
|
514 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Model Config:
|
515 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: LlamaConfig(bos_token_id=0,
|
516 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: eos_token_id=0,
|
517 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: hidden_act='silu',
|
518 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: hidden_size=2048,
|
519 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: initializer_range=0.02,
|
520 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: intermediate_size=8192,
|
521 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: is_llama_config=True,
|
522 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: max_position_embeddings=8192,
|
523 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_attention_heads=32,
|
524 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_hidden_layers=16,
|
525 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: num_key_value_heads=8,
|
526 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pad_token_id=None,
|
527 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: pretraining_tp=1,
|
528 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rms_norm_eps=1e-05,
|
529 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_scaling=None,
|
530 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_theta=10000.0,
|
531 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: rope_interleaved=False,
|
532 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: tie_word_embeddings=True,
|
533 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: use_cache=True,
|
534 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: vocab_size=32768)
|
535 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Building model..
|
536 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Initialize RoPE Theta = 10000.0
|
537 |
+
12/09/2024 22:40:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Setting PP block ranks...
|
538 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Total number of parameters: 1.04G (1984.26MiB)
|
539 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=1|ip-26-0-168-95]: Local number of parameters: 520M (992.13MiB)
|
540 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Local number of parameters: 520M (992.13MiB)
|
541 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=1|ip-26-0-168-95]: [After model building] Memory usage: 992.15MiB. Peak allocated: 5632.00MiB Peak reserved: 15874.00MiB
|
542 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [After model building] Memory usage: 992.15MiB. Peak allocated: 5632.00MiB Peak reserved: 16898.00MiB
|
543 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: No checkpoint path provided.
|
544 |
+
12/09/2024 22:40:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Parametrizing model parameters using StandardParametrizator
|
545 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [Optimizer Building] Using LearningRateForSP as learning rate
|
546 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [ZeRO sharding] Size of optimizer params per rank:
|
547 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [ZeRO sharding] DP Rank 0 has 130M out of 520M (25.00%) params' optimizer states
|
548 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [ZeRO sharding] DP Rank 1 has 130M out of 520M (25.00%) params' optimizer states
|
549 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [ZeRO sharding] DP Rank 2 has 130M out of 520M (25.00%) params' optimizer states
|
550 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [ZeRO sharding] DP Rank 3 has 130M out of 520M (25.00%) params' optimizer states
|
551 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
552 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Using dummy data generator
|
553 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [Training Plan] There are 1 training stages
|
554 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [Stage Stable Training Stage] start from step 1
|
555 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]:
|
556 |
+
12/09/2024 22:40:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: [Start training] datetime: 2024-12-09 22:40:42.900557 | mbs: 1 | grad_accum: 128 | global_batch_size: 512 | sequence_length: 8192 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
557 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
558 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
559 |
+
wandb: Tracking run with wandb version 0.16.0
|
560 |
+
wandb: W&B syncing is set to `offline` in this directory.
|
561 |
+
wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.
|
562 |
+
12/09/2024 22:40:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
563 |
+
12/09/2024 22:40:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 3472.48MiB. Peak allocated 5632.00MiB. Peak reserved: 18884.00MiB
|
564 |
+
12/09/2024 22:41:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 3552.68MiB. Peak allocated 11434.09MiB. Peak reserved: 13284.00MiB
|
565 |
+
12/09/2024 22:41:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: iteration: 1 / 100 | consumed_tokens: 4.19M | elapsed_time_per_iteration_ms: 15.4K | tokens_per_sec: 272K | tokens_per_sec_per_gpu: 34K | global_batch_size: 512 | lm_loss: 10.8 | lr: 0.00015 | model_tflops_per_gpu: 321 | hardware_tflops_per_gpu: 321 | grad_norm: 0.415 | cuda_memory_allocated: 4.77G | cuda_max_memory_reserved: 15G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69G | hd_free_memory_tb: 243G
|
566 |
+
12/09/2024 22:41:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 4552.86MiB. Peak allocated 6785.15MiB. Peak reserved: 14278.00MiB
|
567 |
+
12/09/2024 22:41:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 4552.92MiB. Peak allocated 12434.33MiB. Peak reserved: 14790.00MiB
|
568 |
+
12/09/2024 22:41:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: iteration: 2 / 100 | consumed_tokens: 8.39M | elapsed_time_per_iteration_ms: 13.6K | tokens_per_sec: 309K | tokens_per_sec_per_gpu: 38.6K | global_batch_size: 512 | lm_loss: 10.8 | lr: 0.0003 | model_tflops_per_gpu: 365 | hardware_tflops_per_gpu: 365 | grad_norm: 0.415 | cuda_memory_allocated: 4.77G | cuda_max_memory_reserved: 15.5G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69G | hd_free_memory_tb: 243G
|
569 |
+
12/09/2024 22:41:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 4552.86MiB. Peak allocated 6785.21MiB. Peak reserved: 14790.00MiB
|
570 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 4552.92MiB. Peak allocated 12434.33MiB. Peak reserved: 14790.00MiB
|
571 |
+
num_params
|
572 |
+
{'total': 1040322560, 'local': 520161280}
|
573 |
+
num_params
|
574 |
+
num_params{'total': 1040322560, 'local': 520161280}
|
575 |
+
|
576 |
+
{'total': 1040322560, 'local': 520161280}
|
577 |
+
num_params
|
578 |
+
num_params{'total': 1040322560, 'local': 520161280}
|
579 |
+
|
580 |
+
{'total': 1040322560, 'local': 520161280}
|
581 |
+
num_params
|
582 |
+
num_params{'total': 1040322560, 'local': 520161280}
|
583 |
+
|
584 |
+
{'total': 1040322560, 'local': 520161280}
|
585 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: iteration: 3 / 100 | consumed_tokens: 12.6M | elapsed_time_per_iteration_ms: 13.6K | tokens_per_sec: 308K | tokens_per_sec_per_gpu: 38.5K | global_batch_size: 512 | lm_loss: 10.8 | lr: 0.000296 | model_tflops_per_gpu: 364 | hardware_tflops_per_gpu: 364 | grad_norm: 0.405 | cuda_memory_allocated: 4.77G | cuda_max_memory_reserved: 15.5G | hd_total_memory_tb: 312G | hd_used_memory_tb: 69G | hd_free_memory_tb: 243G
|
586 |
+
num_params
|
587 |
+
{'total': 1040322560, 'local': 520161280}
|
588 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: | job_id | name | nodes | seq_len | mbs | batch_accum | gbs | mTFLOPs | hTFLOPs | tok/s/gpu | AllReduce (GB/s) | AllGather (GB/s) | ReduceScatter (GB/s) | AR Intra-node (GB/s) | AG Intra-node (GB/s) | RS Intra-node (GB/s) | Mem Alloc (GB) | Mem Res (GB) | dp | pp | tp | pp_engine | tp_mode | tp_async_comm | hidden_size | hidden_act | num_layers | num_heads | num_kv_heads | max_pos | vocab_size | tie_word_embeddings | dtype | zero_stage | ddp_bucket_cap_mb | accumulate_grad_in_fp32 | Total Params | Local Params |
|
589 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: | -------- | -------------------------------------------------------------- | ----- | ------- | --- | ----------- | --- | ------- | ------- | --------- | ---------------- | ---------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------- | ------------ | -- | -- | -- | --------------------------------------------------------------------------------------------------------- | ----------------------------------- | ------------- | ----------- | ---------- | ---------- | --------- | ------------ | ------- | ---------- | ------------------- | -------------- | ---------- | ----------------- | ----------------------- | ------------ | ------------ |
|
590 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
591 |
+
slurmstepd: error: *** JOB 13264673 ON ip-26-0-168-95 CANCELLED AT 2024-12-09T22:41:33 ***
|
592 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: | 13264673 | 1.14G_dp4_tp2_pp1_acc128_mbs1_seq8192_zero1_tpmodeALL_vocab32k | 1 | 8192 | 1 | 128 | 512 | 364.46 | 364.46 | 38516.67 | 459.89 | 262.47 | 262.59 | 457.00 | 264.79 | 263.88 | 6.63 | 14.44 | 4 | 1 | 2 | <nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f1a84878e20> | TensorParallelLinearMode.ALL_REDUCE | False | 2048 | silu | 16 | 32 | 8 | 8192 | 32768 | True | torch.bfloat16 | 1 | 25 | True | 1.04G | 520M |
|
593 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
594 |
+
12/09/2024 22:41:33 [INFO|DP=2|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
595 |
+
12/09/2024 22:41:33 [INFO|DP=1|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
596 |
+
12/09/2024 22:41:33 [INFO|DP=3|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
597 |
+
12/09/2024 22:41:33 [INFO|DP=2|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
598 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
599 |
+
12/09/2024 22:41:33 [INFO|DP=1|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
600 |
+
12/09/2024 22:41:33 [INFO|DP=3|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
601 |
+
12/09/2024 22:41:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-168-95]: Memory usage: 4552.86MiB. Peak allocated 6785.21MiB. Peak reserved: 14790.00MiB
|
602 |
+
slurmstepd: error: *** STEP 13264673.0 ON ip-26-0-168-95 CANCELLED AT 2024-12-09T22:41:33 ***
|
603 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
604 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491602 closing signal SIGTERM
|
605 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491603 closing signal SIGTERM
|
606 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491604 closing signal SIGTERM
|
607 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491605 closing signal SIGTERM
|
608 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491606 closing signal SIGTERM
|
609 |
+
[2024-12-09 22:41:33,975] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491607 closing signal SIGTERM
|
610 |
+
[2024-12-09 22:41:33,976] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491608 closing signal SIGTERM
|
611 |
+
[2024-12-09 22:41:33,976] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2491609 closing signal SIGTERM
|
logs/13264905-bench_1.14G_dp8_tp2_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13265355-bench_1.34G_dp2_tp4_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.out
ADDED
@@ -0,0 +1,835 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-165-38
|
400 |
+
+ export NODELIST=ip-26-0-165-38
|
401 |
+
+ NODELIST=ip-26-0-165-38
|
402 |
+
++ scontrol show hostnames ip-26-0-165-38
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-165-38
|
405 |
+
+ MASTER_NODE=ip-26-0-165-38
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-165-38'
|
419 |
+
Master node: ip-26-0-165-38
|
420 |
+
+ echo 'All nodes: ip-26-0-165-38'
|
421 |
+
All nodes: ip-26-0-165-38
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13265355 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-165-38:12356 run_train.py --config-file benchmark/configs/config_1.34G_dp2_tp4_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
425 |
+
[2024-12-10 01:01:06,921] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-10 01:01:06,921] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-10 01:01:06,921] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-10 01:01:06,921] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-10 01:01:06,921] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Config:
|
431 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Config(general=GeneralArgs(project='debug',
|
432 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: run='1.34G_dp2_tp4_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab131k',
|
433 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: seed=42,
|
434 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: step=None,
|
435 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: consumed_train_samples=None,
|
436 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
437 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: ignore_sanity_checks=True),
|
438 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: parallelism=ParallelismArgs(dp=2,
|
439 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pp=1,
|
440 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp=4,
|
441 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f8b14184e80>,
|
442 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_mode=<TensorParallelLinearMode.ALL_REDUCE: 1>,
|
443 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_linear_async_communication=False,
|
444 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: recompute_layer=False,
|
445 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_recompute_allgather=True,
|
446 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: expert_parallel_size=1),
|
447 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
448 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: eos_token_id=0,
|
449 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_act='silu',
|
450 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_size=2048,
|
451 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: initializer_range=0.02,
|
452 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: intermediate_size=8192,
|
453 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: is_llama_config=True,
|
454 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: max_position_embeddings=2048,
|
455 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_attention_heads=32,
|
456 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_hidden_layers=16,
|
457 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_key_value_heads=8,
|
458 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pad_token_id=None,
|
459 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pretraining_tp=1,
|
460 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rms_norm_eps=1e-05,
|
461 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_scaling=None,
|
462 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_theta=10000.0,
|
463 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_interleaved=False,
|
464 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tie_word_embeddings=True,
|
465 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: use_cache=True,
|
466 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: vocab_size=131072),
|
467 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: init_method=RandomInit(std=0.02),
|
468 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: dtype=torch.bfloat16,
|
469 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: make_vocab_size_divisible_by=1,
|
470 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: ddp_bucket_cap_mb=25),
|
471 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
472 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer_revision=None,
|
473 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer_max_length=None),
|
474 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
475 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoint_interval=10000,
|
476 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: save_initial_state=False,
|
477 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: save_final_state=False,
|
478 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: resume_checkpoint_path=None,
|
479 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoints_path_is_shared_file_system=False),
|
480 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: logging=LoggingArgs(log_level='info',
|
481 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: log_level_replica='info',
|
482 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: iteration_step_info_interval=1),
|
483 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokens=TokensArgs(sequence_length=2048,
|
484 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: train_steps=100,
|
485 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: micro_batch_size=64,
|
486 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: batch_accumulation_per_replica=16,
|
487 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: val_check_interval=100,
|
488 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: limit_val_batches=0,
|
489 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: limit_test_batches=0),
|
490 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
491 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: adam_beta1=0.9,
|
492 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: adam_beta2=0.95,
|
493 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: torch_adam_is_fused=True,
|
494 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: name='adamW'),
|
495 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: zero_stage=1,
|
496 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: weight_decay=0.01,
|
497 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: clip_grad=1.0,
|
498 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: accumulate_grad_in_fp32=True,
|
499 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
500 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_warmup_steps=2,
|
501 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_warmup_style='linear',
|
502 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_style='cosine',
|
503 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_steps=13,
|
504 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_starting_step=None,
|
505 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: min_decay_lr=1e-05)),
|
506 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
507 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: start_training_step=1,
|
508 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: data=DataArgs(dataset=None,
|
509 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: seed=42,
|
510 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_loading_workers=1))],
|
511 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: profiler=None,
|
512 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lighteval=None,
|
513 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: s3_upload=None)
|
514 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Model Config:
|
515 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: LlamaConfig(bos_token_id=0,
|
516 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: eos_token_id=0,
|
517 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_act='silu',
|
518 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_size=2048,
|
519 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: initializer_range=0.02,
|
520 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: intermediate_size=8192,
|
521 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: is_llama_config=True,
|
522 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: max_position_embeddings=2048,
|
523 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_attention_heads=32,
|
524 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_hidden_layers=16,
|
525 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_key_value_heads=8,
|
526 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pad_token_id=None,
|
527 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pretraining_tp=1,
|
528 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rms_norm_eps=1e-05,
|
529 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_scaling=None,
|
530 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_theta=10000.0,
|
531 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_interleaved=False,
|
532 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tie_word_embeddings=True,
|
533 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: use_cache=True,
|
534 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: vocab_size=131072)
|
535 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Building model..
|
536 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Initialize RoPE Theta = 10000.0
|
537 |
+
12/10/2024 01:01:47 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Setting PP block ranks...
|
538 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Total number of parameters: 1.24G (2368.52MiB)
|
539 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
540 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
541 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
542 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
543 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5632.00MiB Peak reserved: 16898.00MiB
|
544 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-38]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5632.00MiB Peak reserved: 16898.00MiB
|
545 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-38]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5632.00MiB Peak reserved: 17922.00MiB
|
546 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-38]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5632.00MiB Peak reserved: 16898.00MiB
|
547 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: No checkpoint path provided.
|
548 |
+
12/10/2024 01:01:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Parametrizing model parameters using StandardParametrizator
|
549 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Optimizer Building] Using LearningRateForSP as learning rate
|
550 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] Size of optimizer params per rank:
|
551 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 0 has 155M out of 310M (50.00%) params' optimizer states
|
552 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 1 has 155M out of 310M (50.00%) params' optimizer states
|
553 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
554 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Using dummy data generator
|
555 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Training Plan] There are 1 training stages
|
556 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Stage Stable Training Stage] start from step 1
|
557 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]:
|
558 |
+
12/10/2024 01:01:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Start training] datetime: 2024-12-10 01:01:51.815274 | mbs: 64 | grad_accum: 16 | global_batch_size: 2048 | sequence_length: 2048 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
559 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
560 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
561 |
+
Traceback (most recent call last):
|
562 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
563 |
+
trainer.train(dataloader)
|
564 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
565 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
566 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
567 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
568 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
569 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
570 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
571 |
+
output = model(**micro_batch)
|
572 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
573 |
+
Traceback (most recent call last):
|
574 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
575 |
+
trainer.train(dataloader)
|
576 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
577 |
+
return self._call_impl(*args, **kwargs)
|
578 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
579 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
580 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
581 |
+
return forward_call(*args, **kwargs)
|
582 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
583 |
+
Traceback (most recent call last):
|
584 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
585 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
586 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
587 |
+
sharded_logits = self.model(
|
588 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
589 |
+
trainer.train(dataloader)
|
590 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
591 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
592 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
593 |
+
return self._call_impl(*args, **kwargs)
|
594 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
595 |
+
output = model(**micro_batch)
|
596 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
597 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
598 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
599 |
+
Traceback (most recent call last):
|
600 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 250, in <module>
|
601 |
+
return forward_call(*args, **kwargs)
|
602 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
603 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
604 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
605 |
+
return self._call_impl(*args, **kwargs)
|
606 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
607 |
+
trainer.train(dataloader)
|
608 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 456, in train
|
609 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
610 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
611 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
612 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
613 |
+
return forward_call(*args, **kwargs)
|
614 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
615 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
616 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 493, in training_step
|
617 |
+
output = model(**micro_batch)
|
618 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
619 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
620 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
621 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
622 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
623 |
+
sharded_logits = self.model(
|
624 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
625 |
+
return self._call_impl(*args, **kwargs)
|
626 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
627 |
+
return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
628 |
+
|
629 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
630 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
631 |
+
return forward_call(*args, **kwargs)
|
632 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
633 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
634 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
635 |
+
return forward_call(*args, **kwargs)
|
636 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
637 |
+
output = model(**micro_batch)
|
638 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
639 |
+
output = self.pp_block(**new_kwargs)
|
640 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
641 |
+
return forward_call(*args, **kwargs)
|
642 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
643 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
644 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
645 |
+
return self._call_impl(*args, **kwargs)
|
646 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
647 |
+
sharded_logits = self.model(
|
648 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
649 |
+
return self._call_impl(*args, **kwargs)
|
650 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
651 |
+
return forward_call(*args, **kwargs)
|
652 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
653 |
+
return self._call_impl(*args, **kwargs)
|
654 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
655 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
656 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
657 |
+
return forward_call(*args, **kwargs)
|
658 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
659 |
+
return forward_call(*args, **kwargs)
|
660 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
661 |
+
sharded_logits = self.model(
|
662 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
663 |
+
return self._call_impl(*args, **kwargs)
|
664 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
665 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
666 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
667 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
668 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
669 |
+
return self._call_impl(*args, **kwargs)
|
670 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
671 |
+
return forward_call(*args, **kwargs)
|
672 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
673 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
674 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
675 |
+
return forward_call(*args, **kwargs)
|
676 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
677 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
678 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
679 |
+
return self._call_impl(*args, **kwargs)
|
680 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
681 |
+
output = self.pp_block(**new_kwargs)
|
682 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
683 |
+
return self._call_impl(*args, **kwargs)
|
684 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
685 |
+
return forward_call(*args, **kwargs)
|
686 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
687 |
+
return self._call_impl(*args, **kwargs)
|
688 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
689 |
+
return forward_call(*args, **kwargs)
|
690 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
691 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
692 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
693 |
+
return forward_call(*args, **kwargs)
|
694 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
695 |
+
merged_states = self.gate_up_proj(hidden_states)
|
696 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
697 |
+
output = self.pp_block(**new_kwargs)
|
698 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
699 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
700 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
701 |
+
return self._call_impl(*args, **kwargs)
|
702 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
703 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
704 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
705 |
+
return self._call_impl(*args, **kwargs)
|
706 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
707 |
+
return forward_call(*args, **kwargs)
|
708 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
709 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
710 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
711 |
+
return self._call_impl(*args, **kwargs)
|
712 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
713 |
+
return forward_call(*args, **kwargs)
|
714 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
715 |
+
return forward_call(*args, **kwargs)
|
716 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
717 |
+
return self._call_impl(*args, **kwargs)
|
718 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
719 |
+
return column_linear(
|
720 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 445, in column_linear
|
721 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
722 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
723 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
724 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
725 |
+
return forward_call(*args, **kwargs)
|
726 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
727 |
+
return self._call_impl(*args, **kwargs)
|
728 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
729 |
+
return F.linear(input, weight, bias)
|
730 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 5 has a total capacty of 79.33 GiB of which 989.94 MiB is free. Including non-PyTorch memory, this process has 78.35 GiB memory in use. Of the allocated memory 67.49 GiB is allocated by PyTorch, and 455.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
731 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
732 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
733 |
+
output = self.pp_block(**new_kwargs)
|
734 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
735 |
+
return forward_call(*args, **kwargs)
|
736 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 201, in forward
|
737 |
+
return self._call_impl(*args, **kwargs)
|
738 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
739 |
+
return self._call_impl(*args, **kwargs)
|
740 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
741 |
+
return self.act(gate_states) * up_states
|
742 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
743 |
+
return forward_call(*args, **kwargs)
|
744 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
745 |
+
return forward_call(*args, **kwargs)
|
746 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 244, in forward
|
747 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
748 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
749 |
+
merged_states = self.gate_up_proj(hidden_states)
|
750 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
751 |
+
return self._call_impl(*args, **kwargs)
|
752 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
753 |
+
return self._call_impl(*args, **kwargs)
|
754 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
755 |
+
return forward_call(*args, **kwargs)
|
756 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/nn/activations.py", line 149, in forward
|
757 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
758 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
759 |
+
return forward_call(*args, **kwargs)
|
760 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 89, in forward
|
761 |
+
return nn.functional.silu(input)
|
762 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/functional.py", line 2072, in silu
|
763 |
+
return self._call_impl(*args, **kwargs)
|
764 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
765 |
+
return column_linear(
|
766 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 445, in column_linear
|
767 |
+
return torch._C._nn.silu(input)
|
768 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 MiB. GPU 4 has a total capacty of 79.33 GiB of which 13.94 MiB is free. Including non-PyTorch memory, this process has 79.30 GiB memory in use. Of the allocated memory 68.49 GiB is allocated by PyTorch, and 455.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
769 |
+
return forward_call(*args, **kwargs)
|
770 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
771 |
+
return F.linear(input, weight, bias)
|
772 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 6 has a total capacty of 79.33 GiB of which 797.94 MiB is free. Including non-PyTorch memory, this process has 78.54 GiB memory in use. Of the allocated memory 67.49 GiB is allocated by PyTorch, and 647.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
773 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
774 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
775 |
+
return self._call_impl(*args, **kwargs)
|
776 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
777 |
+
return forward_call(*args, **kwargs)
|
778 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 201, in forward
|
779 |
+
return self.act(gate_states) * up_states
|
780 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 MiB. GPU 7 has a total capacty of 79.33 GiB of which 413.94 MiB is free. Including non-PyTorch memory, this process has 78.91 GiB memory in use. Of the allocated memory 68.99 GiB is allocated by PyTorch, and 455.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
781 |
+
[2024-12-10 01:01:57,263] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 686871 closing signal SIGTERM
|
782 |
+
[2024-12-10 01:01:57,263] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 686872 closing signal SIGTERM
|
783 |
+
[2024-12-10 01:01:57,263] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 686873 closing signal SIGTERM
|
784 |
+
[2024-12-10 01:01:57,263] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 686874 closing signal SIGTERM
|
785 |
+
[2024-12-10 01:01:58,293] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 4 (pid: 686875) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
786 |
+
Traceback (most recent call last):
|
787 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
788 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
789 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
790 |
+
return f(*args, **kwargs)
|
791 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
792 |
+
run(args)
|
793 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
794 |
+
elastic_launch(
|
795 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
796 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
797 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
798 |
+
raise ChildFailedError(
|
799 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
800 |
+
============================================================
|
801 |
+
run_train.py FAILED
|
802 |
+
------------------------------------------------------------
|
803 |
+
Failures:
|
804 |
+
[1]:
|
805 |
+
time : 2024-12-10_01:01:57
|
806 |
+
host : ip-26-0-165-38.ec2.internal
|
807 |
+
rank : 5 (local_rank: 5)
|
808 |
+
exitcode : 1 (pid: 686876)
|
809 |
+
error_file: <N/A>
|
810 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
811 |
+
[2]:
|
812 |
+
time : 2024-12-10_01:01:57
|
813 |
+
host : ip-26-0-165-38.ec2.internal
|
814 |
+
rank : 6 (local_rank: 6)
|
815 |
+
exitcode : 1 (pid: 686877)
|
816 |
+
error_file: <N/A>
|
817 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
818 |
+
[3]:
|
819 |
+
time : 2024-12-10_01:01:57
|
820 |
+
host : ip-26-0-165-38.ec2.internal
|
821 |
+
rank : 7 (local_rank: 7)
|
822 |
+
exitcode : 1 (pid: 686878)
|
823 |
+
error_file: <N/A>
|
824 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
825 |
+
------------------------------------------------------------
|
826 |
+
Root Cause (first observed failure):
|
827 |
+
[0]:
|
828 |
+
time : 2024-12-10_01:01:57
|
829 |
+
host : ip-26-0-165-38.ec2.internal
|
830 |
+
rank : 4 (local_rank: 4)
|
831 |
+
exitcode : 1 (pid: 686875)
|
832 |
+
error_file: <N/A>
|
833 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
834 |
+
============================================================
|
835 |
+
srun: error: ip-26-0-165-38: task 0: Exited with exit code 1
|
logs/13265627-bench_1.34G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13265710-bench_1.34G_dp8_tp4_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,740 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-165-38,ip-26-0-166-15,ip-26-0-169-132,ip-26-0-170-143
|
400 |
+
+ export 'NODELIST=ip-26-0-165-38
|
401 |
+
ip-26-0-166-15
|
402 |
+
ip-26-0-169-132
|
403 |
+
ip-26-0-170-143'
|
404 |
+
+ NODELIST='ip-26-0-165-38
|
405 |
+
ip-26-0-166-15
|
406 |
+
ip-26-0-169-132
|
407 |
+
ip-26-0-170-143'
|
408 |
+
++ scontrol show hostnames ip-26-0-165-38,ip-26-0-166-15,ip-26-0-169-132,ip-26-0-170-143
|
409 |
+
++ head -n1
|
410 |
+
+ export MASTER_NODE=ip-26-0-165-38
|
411 |
+
+ MASTER_NODE=ip-26-0-165-38
|
412 |
+
+ export MASTER_PORT=12356
|
413 |
+
+ MASTER_PORT=12356
|
414 |
+
+ export NNODES=4
|
415 |
+
+ NNODES=4
|
416 |
+
+ export GPUS_PER_NODE=8
|
417 |
+
+ GPUS_PER_NODE=8
|
418 |
+
+ export WORLD_SIZE=32
|
419 |
+
+ WORLD_SIZE=32
|
420 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
421 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
422 |
+
+ export NANOTRON_BENCHMARK=1
|
423 |
+
+ NANOTRON_BENCHMARK=1
|
424 |
+
+ echo 'Master node: ip-26-0-165-38'
|
425 |
+
Master node: ip-26-0-165-38
|
426 |
+
+ echo 'All nodes: ip-26-0-165-38
|
427 |
+
ip-26-0-166-15
|
428 |
+
ip-26-0-169-132
|
429 |
+
ip-26-0-170-143'
|
430 |
+
All nodes: ip-26-0-165-38
|
431 |
+
ip-26-0-166-15
|
432 |
+
ip-26-0-169-132
|
433 |
+
ip-26-0-170-143
|
434 |
+
+ echo 'World size: 32'
|
435 |
+
World size: 32
|
436 |
+
+ srun torchrun --nnodes=4 --nproc_per_node=8 --rdzv_id=13265710 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-165-38:12356 run_train.py --config-file benchmark/configs/config_1.34G_dp8_tp4_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
437 |
+
[2024-12-10 05:19:39,069] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
438 |
+
[2024-12-10 05:19:39,069] torch.distributed.run: [WARNING]
|
439 |
+
[2024-12-10 05:19:39,069] torch.distributed.run: [WARNING] *****************************************
|
440 |
+
[2024-12-10 05:19:39,069] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
441 |
+
[2024-12-10 05:19:39,069] torch.distributed.run: [WARNING] *****************************************
|
442 |
+
[2024-12-10 05:19:39,123] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
443 |
+
[2024-12-10 05:19:39,123] torch.distributed.run: [WARNING]
|
444 |
+
[2024-12-10 05:19:39,123] torch.distributed.run: [WARNING] *****************************************
|
445 |
+
[2024-12-10 05:19:39,123] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
446 |
+
[2024-12-10 05:19:39,123] torch.distributed.run: [WARNING] *****************************************
|
447 |
+
[2024-12-10 05:19:39,281] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
448 |
+
[2024-12-10 05:19:39,281] torch.distributed.run: [WARNING]
|
449 |
+
[2024-12-10 05:19:39,281] torch.distributed.run: [WARNING] *****************************************
|
450 |
+
[2024-12-10 05:19:39,281] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
451 |
+
[2024-12-10 05:19:39,281] torch.distributed.run: [WARNING] *****************************************
|
452 |
+
[2024-12-10 05:19:39,435] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
453 |
+
[2024-12-10 05:19:39,435] torch.distributed.run: [WARNING]
|
454 |
+
[2024-12-10 05:19:39,435] torch.distributed.run: [WARNING] *****************************************
|
455 |
+
[2024-12-10 05:19:39,435] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
456 |
+
[2024-12-10 05:19:39,435] torch.distributed.run: [WARNING] *****************************************
|
457 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Config:
|
458 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Config(general=GeneralArgs(project='debug',
|
459 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: run='1.34G_dp8_tp4_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab131k',
|
460 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: seed=42,
|
461 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: step=None,
|
462 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: consumed_train_samples=None,
|
463 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
464 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: ignore_sanity_checks=True),
|
465 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: parallelism=ParallelismArgs(dp=8,
|
466 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pp=1,
|
467 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp=4,
|
468 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f062c1b0e80>,
|
469 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
470 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_linear_async_communication=True,
|
471 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: recompute_layer=False,
|
472 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tp_recompute_allgather=True,
|
473 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: expert_parallel_size=1),
|
474 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
475 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: eos_token_id=0,
|
476 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_act='silu',
|
477 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_size=2048,
|
478 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: initializer_range=0.02,
|
479 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: intermediate_size=8192,
|
480 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: is_llama_config=True,
|
481 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: max_position_embeddings=8192,
|
482 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_attention_heads=32,
|
483 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_hidden_layers=16,
|
484 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_key_value_heads=8,
|
485 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pad_token_id=None,
|
486 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pretraining_tp=1,
|
487 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rms_norm_eps=1e-05,
|
488 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_scaling=None,
|
489 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_theta=10000.0,
|
490 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_interleaved=False,
|
491 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tie_word_embeddings=True,
|
492 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: use_cache=True,
|
493 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: vocab_size=131072),
|
494 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: init_method=RandomInit(std=0.02),
|
495 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: dtype=torch.bfloat16,
|
496 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: make_vocab_size_divisible_by=1,
|
497 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: ddp_bucket_cap_mb=25),
|
498 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
499 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer_revision=None,
|
500 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokenizer_max_length=None),
|
501 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
502 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoint_interval=10000,
|
503 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: save_initial_state=False,
|
504 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: save_final_state=False,
|
505 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: resume_checkpoint_path=None,
|
506 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: checkpoints_path_is_shared_file_system=False),
|
507 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: logging=LoggingArgs(log_level='info',
|
508 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: log_level_replica='info',
|
509 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: iteration_step_info_interval=1),
|
510 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tokens=TokensArgs(sequence_length=8192,
|
511 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: train_steps=100,
|
512 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: micro_batch_size=1,
|
513 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: batch_accumulation_per_replica=16,
|
514 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: val_check_interval=100,
|
515 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: limit_val_batches=0,
|
516 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: limit_test_batches=0),
|
517 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
518 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: adam_beta1=0.9,
|
519 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: adam_beta2=0.95,
|
520 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: torch_adam_is_fused=True,
|
521 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: name='adamW'),
|
522 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: zero_stage=1,
|
523 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: weight_decay=0.01,
|
524 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: clip_grad=1.0,
|
525 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: accumulate_grad_in_fp32=True,
|
526 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
527 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_warmup_steps=2,
|
528 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_warmup_style='linear',
|
529 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_style='cosine',
|
530 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_steps=13,
|
531 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lr_decay_starting_step=None,
|
532 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: min_decay_lr=1e-05)),
|
533 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
534 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: start_training_step=1,
|
535 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: data=DataArgs(dataset=None,
|
536 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: seed=42,
|
537 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_loading_workers=1))],
|
538 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: profiler=None,
|
539 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: lighteval=None,
|
540 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: s3_upload=None)
|
541 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Model Config:
|
542 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: LlamaConfig(bos_token_id=0,
|
543 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: eos_token_id=0,
|
544 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_act='silu',
|
545 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: hidden_size=2048,
|
546 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: initializer_range=0.02,
|
547 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: intermediate_size=8192,
|
548 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: is_llama_config=True,
|
549 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: max_position_embeddings=8192,
|
550 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_attention_heads=32,
|
551 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_hidden_layers=16,
|
552 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: num_key_value_heads=8,
|
553 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pad_token_id=None,
|
554 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: pretraining_tp=1,
|
555 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rms_norm_eps=1e-05,
|
556 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_scaling=None,
|
557 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_theta=10000.0,
|
558 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: rope_interleaved=False,
|
559 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: tie_word_embeddings=True,
|
560 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: use_cache=True,
|
561 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: vocab_size=131072)
|
562 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Building model..
|
563 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Initialize RoPE Theta = 10000.0
|
564 |
+
12/10/2024 05:20:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Setting PP block ranks...
|
565 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Total number of parameters: 1.24G (2368.52MiB)
|
566 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
567 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
568 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
569 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-38]: Local number of parameters: 310M (592.13MiB)
|
570 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-38]: [After model building] Memory usage: 594.15MiB. Peak allocated: 5440.00MiB Peak reserved: 35138.00MiB
|
571 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [After model building] Memory usage: 594.15MiB. Peak allocated: 5440.00MiB Peak reserved: 35138.00MiB
|
572 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-38]: [After model building] Memory usage: 594.15MiB. Peak allocated: 5440.00MiB Peak reserved: 35138.00MiB
|
573 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-38]: [After model building] Memory usage: 594.15MiB. Peak allocated: 5440.00MiB Peak reserved: 35138.00MiB
|
574 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: No checkpoint path provided.
|
575 |
+
12/10/2024 05:20:26 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Parametrizing model parameters using StandardParametrizator
|
576 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Optimizer Building] Using LearningRateForSP as learning rate
|
577 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] Size of optimizer params per rank:
|
578 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 0 has 38.8M out of 310M (12.50%) params' optimizer states
|
579 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 1 has 38.8M out of 310M (12.50%) params' optimizer states
|
580 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 2 has 38.8M out of 310M (12.50%) params' optimizer states
|
581 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 3 has 38.8M out of 310M (12.50%) params' optimizer states
|
582 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 4 has 38.8M out of 310M (12.50%) params' optimizer states
|
583 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 5 has 38.8M out of 310M (12.50%) params' optimizer states
|
584 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 6 has 38.8M out of 310M (12.50%) params' optimizer states
|
585 |
+
12/10/2024 05:20:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [ZeRO sharding] DP Rank 7 has 38.8M out of 310M (12.50%) params' optimizer states
|
586 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
587 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Using dummy data generator
|
588 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Training Plan] There are 1 training stages
|
589 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Stage Stable Training Stage] start from step 1
|
590 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]:
|
591 |
+
12/10/2024 05:20:30 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: [Start training] datetime: 2024-12-10 05:20:30.184328 | mbs: 1 | grad_accum: 16 | global_batch_size: 128 | sequence_length: 8192 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
592 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
593 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
594 |
+
wandb: Tracking run with wandb version 0.16.0
|
595 |
+
wandb: W&B syncing is set to `offline` in this directory.
|
596 |
+
wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.
|
597 |
+
12/10/2024 05:20:39 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
598 |
+
12/10/2024 05:20:39 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 1926.44MiB. Peak allocated 5440.00MiB. Peak reserved: 36324.00MiB
|
599 |
+
12/10/2024 05:20:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 2070.59MiB. Peak allocated 7019.23MiB. Peak reserved: 9614.00MiB
|
600 |
+
12/10/2024 05:20:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: iteration: 1 / 100 | consumed_tokens: 1.05M | elapsed_time_per_iteration_ms: 3.35K | tokens_per_sec: 313K | tokens_per_sec_per_gpu: 9.77K | global_batch_size: 128 | lm_loss: 12.2 | lr: 0.00015 | model_tflops_per_gpu: 104 | hardware_tflops_per_gpu: 104 | grad_norm: 0.619 | cuda_memory_allocated: 2.48G | cuda_max_memory_reserved: 10.1G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70G | hd_free_memory_tb: 242G
|
601 |
+
12/10/2024 05:20:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 2366.71MiB. Peak allocated 3624.98MiB. Peak reserved: 9654.00MiB
|
602 |
+
12/10/2024 05:20:43 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 2366.71MiB. Peak allocated 7315.35MiB. Peak reserved: 9654.00MiB
|
603 |
+
12/10/2024 05:20:43 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: iteration: 2 / 100 | consumed_tokens: 2.1M | elapsed_time_per_iteration_ms: 1.3K | tokens_per_sec: 807K | tokens_per_sec_per_gpu: 25.2K | global_batch_size: 128 | lm_loss: 12.2 | lr: 0.0003 | model_tflops_per_gpu: 269 | hardware_tflops_per_gpu: 269 | grad_norm: 0.619 | cuda_memory_allocated: 2.48G | cuda_max_memory_reserved: 10.1G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70G | hd_free_memory_tb: 242G
|
604 |
+
12/10/2024 05:20:43 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 2366.71MiB. Peak allocated 3624.99MiB. Peak reserved: 9654.00MiB
|
605 |
+
12/10/2024 05:20:44 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Memory usage: 2366.71MiB. Peak allocated 7315.35MiB. Peak reserved: 9654.00MiB
|
606 |
+
num_params
|
607 |
+
{'total': 1241784320, 'local': 310446080}num_params
|
608 |
+
|
609 |
+
{'total': 1241784320, 'local': 310446080}
|
610 |
+
num_paramsnum_paramsnum_params
|
611 |
+
|
612 |
+
num_params
|
613 |
+
{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
614 |
+
num_params
|
615 |
+
{'total': 1241784320, 'local': 310446080}
|
616 |
+
|
617 |
+
|
618 |
+
{'total': 1241784320, 'local': 310446080}
|
619 |
+
{'total': 1241784320, 'local': 310446080}
|
620 |
+
num_paramsnum_paramsnum_params
|
621 |
+
|
622 |
+
|
623 |
+
num_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
624 |
+
|
625 |
+
{'total': 1241784320, 'local': 310446080}
|
626 |
+
|
627 |
+
num_paramsnum_params
|
628 |
+
{'total': 1241784320, 'local': 310446080}
|
629 |
+
num_params
|
630 |
+
num_paramsnum_paramsnum_params
|
631 |
+
|
632 |
+
{'total': 1241784320, 'local': 310446080}
|
633 |
+
|
634 |
+
num_paramsnum_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
635 |
+
|
636 |
+
|
637 |
+
|
638 |
+
num_paramsnum_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
639 |
+
|
640 |
+
|
641 |
+
|
642 |
+
{'total': 1241784320, 'local': 310446080}
|
643 |
+
{'total': 1241784320, 'local': 310446080}
|
644 |
+
num_params
|
645 |
+
{'total': 1241784320, 'local': 310446080}
|
646 |
+
num_paramsnum_paramsnum_params
|
647 |
+
|
648 |
+
{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
649 |
+
num_params
|
650 |
+
|
651 |
+
|
652 |
+
{'total': 1241784320, 'local': 310446080}
|
653 |
+
{'total': 1241784320, 'local': 310446080}
|
654 |
+
{'total': 1241784320, 'local': 310446080}
|
655 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
656 |
+
|
657 |
+
|
658 |
+
{'total': 1241784320, 'local': 310446080}
|
659 |
+
{'total': 1241784320, 'local': 310446080}
|
660 |
+
num_paramsnum_paramsnum_params
|
661 |
+
|
662 |
+
{'total': 1241784320, 'local': 310446080}
|
663 |
+
|
664 |
+
{'total': 1241784320, 'local': 310446080}
|
665 |
+
{'total': 1241784320, 'local': 310446080}
|
666 |
+
num_params
|
667 |
+
{'total': 1241784320, 'local': 310446080}
|
668 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: iteration: 3 / 100 | consumed_tokens: 3.15M | elapsed_time_per_iteration_ms: 1.3K | tokens_per_sec: 804K | tokens_per_sec_per_gpu: 25.1K | global_batch_size: 128 | lm_loss: 12.2 | lr: 0.000296 | model_tflops_per_gpu: 268 | hardware_tflops_per_gpu: 268 | grad_norm: 0.61 | cuda_memory_allocated: 2.48G | cuda_max_memory_reserved: 10.1G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70G | hd_free_memory_tb: 242G
|
669 |
+
num_params
|
670 |
+
{'total': 1241784320, 'local': 310446080}
|
671 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: | job_id | name | nodes | seq_len | mbs | batch_accum | gbs | mTFLOPs | hTFLOPs | tok/s/gpu | AllReduce (GB/s) | AllGather (GB/s) | ReduceScatter (GB/s) | AR Intra-node (GB/s) | AG Intra-node (GB/s) | RS Intra-node (GB/s) | Mem Alloc (GB) | Mem Res (GB) | dp | pp | tp | pp_engine | tp_mode | tp_async_comm | hidden_size | hidden_act | num_layers | num_heads | num_kv_heads | max_pos | vocab_size | tie_word_embeddings | dtype | zero_stage | ddp_bucket_cap_mb | accumulate_grad_in_fp32 | Total Params | Local Params |
|
672 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: | -------- | -------------------------------------------------------------- | ----- | ------- | --- | ----------- | --- | ------- | ------- | --------- | ---------------- | ---------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------- | ------------ | -- | -- | -- | --------------------------------------------------------------------------------------------------------- | --------------------------------------- | ------------- | ----------- | ---------- | ---------- | --------- | ------------ | ------- | ---------- | ------------------- | -------------- | ---------- | ----------------- | ----------------------- | ------------ | ------------ |
|
673 |
+
12/10/2024 05:20:45 [INFO|DP=6|PP=0|TP=0|ip-26-0-170-143]: Throughput logging complete
|
674 |
+
12/10/2024 05:20:45 [INFO|DP=5|PP=0|TP=2|ip-26-0-169-132]: Throughput logging complete
|
675 |
+
12/10/2024 05:20:45 [INFO|DP=5|PP=0|TP=3|ip-26-0-169-132]: Throughput logging complete
|
676 |
+
12/10/2024 05:20:45 [INFO|DP=2|PP=0|TP=2|ip-26-0-166-15]: Throughput logging complete
|
677 |
+
12/10/2024 05:20:45 [INFO|DP=3|PP=0|TP=1|ip-26-0-166-15]: Throughput logging complete
|
678 |
+
12/10/2024 05:20:45 [INFO|DP=3|PP=0|TP=0|ip-26-0-166-15]: Throughput logging complete
|
679 |
+
12/10/2024 05:20:45 [INFO|DP=2|PP=0|TP=0|ip-26-0-166-15]: Throughput logging complete
|
680 |
+
12/10/2024 05:20:45 [INFO|DP=3|PP=0|TP=2|ip-26-0-166-15]: Throughput logging complete
|
681 |
+
12/10/2024 05:20:45 [INFO|DP=5|PP=0|TP=1|ip-26-0-169-132]: Throughput logging complete
|
682 |
+
12/10/2024 05:20:45 [INFO|DP=4|PP=0|TP=2|ip-26-0-169-132]: Throughput logging complete
|
683 |
+
12/10/2024 05:20:45 [INFO|DP=4|PP=0|TP=0|ip-26-0-169-132]: Throughput logging complete
|
684 |
+
12/10/2024 05:20:45 [INFO|DP=5|PP=0|TP=0|ip-26-0-169-132]: Throughput logging complete
|
685 |
+
12/10/2024 05:20:45 [INFO|DP=7|PP=0|TP=2|ip-26-0-170-143]: Throughput logging complete
|
686 |
+
12/10/2024 05:20:45 [INFO|DP=3|PP=0|TP=3|ip-26-0-166-15]: Throughput logging complete
|
687 |
+
12/10/2024 05:20:45 [INFO|DP=2|PP=0|TP=3|ip-26-0-166-15]: Throughput logging complete
|
688 |
+
12/10/2024 05:20:45 [INFO|DP=4|PP=0|TP=3|ip-26-0-169-132]: Throughput logging complete
|
689 |
+
12/10/2024 05:20:45 [INFO|DP=4|PP=0|TP=1|ip-26-0-169-132]: Throughput logging complete
|
690 |
+
12/10/2024 05:20:45 [INFO|DP=6|PP=0|TP=3|ip-26-0-170-143]: Throughput logging complete
|
691 |
+
12/10/2024 05:20:45 [INFO|DP=6|PP=0|TP=2|ip-26-0-170-143]: Throughput logging complete
|
692 |
+
12/10/2024 05:20:45 [INFO|DP=7|PP=0|TP=1|ip-26-0-170-143]: Throughput logging complete
|
693 |
+
12/10/2024 05:20:45 [INFO|DP=7|PP=0|TP=0|ip-26-0-170-143]: Throughput logging complete
|
694 |
+
12/10/2024 05:20:45 [INFO|DP=6|PP=0|TP=1|ip-26-0-170-143]: Throughput logging complete
|
695 |
+
12/10/2024 05:20:45 [INFO|DP=7|PP=0|TP=3|ip-26-0-170-143]: Throughput logging complete
|
696 |
+
12/10/2024 05:20:45 [INFO|DP=2|PP=0|TP=1|ip-26-0-166-15]: Throughput logging complete
|
697 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
698 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
699 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
700 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120858 closing signal SIGTERM
|
701 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692495 closing signal SIGTERM
|
702 |
+
slurmstepd: error: *** JOB 13265710 ON ip-26-0-165-38 CANCELLED AT 2024-12-10T05:20:45 ***
|
703 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120859 closing signal SIGTERM
|
704 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692496 closing signal SIGTERM
|
705 |
+
[2024-12-10 05:20:45,117] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692497 closing signal SIGTERM
|
706 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120860 closing signal SIGTERM
|
707 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120861 closing signal SIGTERM
|
708 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692498 closing signal SIGTERM
|
709 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
710 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694073 closing signal SIGTERM
|
711 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694074 closing signal SIGTERM
|
712 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694075 closing signal SIGTERM
|
713 |
+
[2024-12-10 05:20:45,119] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694076 closing signal SIGTERM
|
714 |
+
[2024-12-10 05:20:45,119] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694077 closing signal SIGTERM
|
715 |
+
[2024-12-10 05:20:45,118] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692499 closing signal SIGTERM
|
716 |
+
[2024-12-10 05:20:45,119] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692500 closing signal SIGTERM
|
717 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692501 closing signal SIGTERM
|
718 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 692502 closing signal SIGTERM
|
719 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: | 13265710 | 1.34G_dp8_tp4_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab131k | 4 | 8192 | 1 | 16 | 128 | 268.17 | 268.17 | 25131.92 | 233.00 | 208.83 | 207.26 | 460.15 | 264.97 | 264.29 | 3.54 | 9.43 | 8 | 1 | 4 | <nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f062c1b0e80> | TensorParallelLinearMode.REDUCE_SCATTER | True | 2048 | silu | 16 | 32 | 8 | 8192 | 131072 | True | torch.bfloat16 | 1 | 25 | True | 1.24G | 310M |
|
720 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-38]: Throughput logging complete
|
721 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-38]: Throughput logging complete
|
722 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-38]: Throughput logging complete
|
723 |
+
12/10/2024 05:20:45 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-38]: Throughput logging complete
|
724 |
+
12/10/2024 05:20:45 [INFO|DP=1|PP=0|TP=1|ip-26-0-165-38]: Throughput logging complete
|
725 |
+
12/10/2024 05:20:45 [INFO|DP=1|PP=0|TP=3|ip-26-0-165-38]: Throughput logging complete
|
726 |
+
12/10/2024 05:20:45 [INFO|DP=1|PP=0|TP=2|ip-26-0-165-38]: Throughput logging complete
|
727 |
+
12/10/2024 05:20:45 [INFO|DP=1|PP=0|TP=0|ip-26-0-165-38]: Throughput logging complete
|
728 |
+
slurmstepd: error: *** STEP 13265710.0 ON ip-26-0-165-38 CANCELLED AT 2024-12-10T05:20:45 ***
|
729 |
+
[2024-12-10 05:20:45,119] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
730 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 788507 closing signal SIGTERM
|
731 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 788508 closing signal SIGTERM
|
732 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 788509 closing signal SIGTERM
|
733 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 788510 closing signal SIGTERM
|
734 |
+
[2024-12-10 05:20:45,122] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694078 closing signal SIGTERM
|
735 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120862 closing signal SIGTERM
|
736 |
+
[2024-12-10 05:20:45,122] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694079 closing signal SIGTERM
|
737 |
+
[2024-12-10 05:20:45,120] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120863 closing signal SIGTERM
|
738 |
+
[2024-12-10 05:20:45,122] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1694080 closing signal SIGTERM
|
739 |
+
[2024-12-10 05:20:45,121] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120864 closing signal SIGTERM
|
740 |
+
[2024-12-10 05:20:45,121] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 120865 closing signal SIGTERM
|
logs/13265791-bench_1.34G_dp16_tp4_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.out
ADDED
@@ -0,0 +1,917 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames 'ip-26-0-161-[153,178],ip-26-0-166-[214,244],ip-26-0-168-[95,120],ip-26-0-171-[88,102]'
|
400 |
+
+ export 'NODELIST=ip-26-0-161-153
|
401 |
+
ip-26-0-161-178
|
402 |
+
ip-26-0-166-214
|
403 |
+
ip-26-0-166-244
|
404 |
+
ip-26-0-168-95
|
405 |
+
ip-26-0-168-120
|
406 |
+
ip-26-0-171-88
|
407 |
+
ip-26-0-171-102'
|
408 |
+
+ NODELIST='ip-26-0-161-153
|
409 |
+
ip-26-0-161-178
|
410 |
+
ip-26-0-166-214
|
411 |
+
ip-26-0-166-244
|
412 |
+
ip-26-0-168-95
|
413 |
+
ip-26-0-168-120
|
414 |
+
ip-26-0-171-88
|
415 |
+
ip-26-0-171-102'
|
416 |
+
++ head -n1
|
417 |
+
++ scontrol show hostnames 'ip-26-0-161-[153,178],ip-26-0-166-[214,244],ip-26-0-168-[95,120],ip-26-0-171-[88,102]'
|
418 |
+
+ export MASTER_NODE=ip-26-0-161-153
|
419 |
+
+ MASTER_NODE=ip-26-0-161-153
|
420 |
+
+ export MASTER_PORT=12356
|
421 |
+
+ MASTER_PORT=12356
|
422 |
+
+ export NNODES=8
|
423 |
+
+ NNODES=8
|
424 |
+
+ export GPUS_PER_NODE=8
|
425 |
+
+ GPUS_PER_NODE=8
|
426 |
+
+ export WORLD_SIZE=64
|
427 |
+
+ WORLD_SIZE=64
|
428 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
429 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
430 |
+
+ export NANOTRON_BENCHMARK=1
|
431 |
+
+ NANOTRON_BENCHMARK=1
|
432 |
+
+ echo 'Master node: ip-26-0-161-153'
|
433 |
+
Master node: ip-26-0-161-153
|
434 |
+
+ echo 'All nodes: ip-26-0-161-153
|
435 |
+
ip-26-0-161-178
|
436 |
+
ip-26-0-166-214
|
437 |
+
ip-26-0-166-244
|
438 |
+
ip-26-0-168-95
|
439 |
+
ip-26-0-168-120
|
440 |
+
ip-26-0-171-88
|
441 |
+
ip-26-0-171-102'
|
442 |
+
All nodes: ip-26-0-161-153
|
443 |
+
ip-26-0-161-178
|
444 |
+
ip-26-0-166-214
|
445 |
+
ip-26-0-166-244
|
446 |
+
ip-26-0-168-95
|
447 |
+
ip-26-0-168-120
|
448 |
+
ip-26-0-171-88
|
449 |
+
ip-26-0-171-102
|
450 |
+
+ echo 'World size: 64'
|
451 |
+
World size: 64
|
452 |
+
+ srun torchrun --nnodes=8 --nproc_per_node=8 --rdzv_id=13265791 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-161-153:12356 run_train.py --config-file benchmark/configs/config_1.34G_dp16_tp4_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
453 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
454 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
455 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
456 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING]
|
457 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
458 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
459 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
460 |
+
[2024-12-10 06:25:26,041] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
461 |
+
[2024-12-10 06:25:26,041] torch.distributed.run: [WARNING]
|
462 |
+
[2024-12-10 06:25:26,041] torch.distributed.run: [WARNING] *****************************************
|
463 |
+
[2024-12-10 06:25:26,041] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
464 |
+
[2024-12-10 06:25:26,041] torch.distributed.run: [WARNING] *****************************************
|
465 |
+
[2024-12-10 06:25:26,055] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
466 |
+
[2024-12-10 06:25:26,074] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
467 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING]
|
468 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
469 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
470 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
471 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING]
|
472 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
473 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
474 |
+
[2024-12-10 06:25:26,040] torch.distributed.run: [WARNING] *****************************************
|
475 |
+
[2024-12-10 06:25:26,055] torch.distributed.run: [WARNING]
|
476 |
+
[2024-12-10 06:25:26,055] torch.distributed.run: [WARNING] *****************************************
|
477 |
+
[2024-12-10 06:25:26,055] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
478 |
+
[2024-12-10 06:25:26,055] torch.distributed.run: [WARNING] *****************************************
|
479 |
+
[2024-12-10 06:25:26,114] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
480 |
+
[2024-12-10 06:25:26,074] torch.distributed.run: [WARNING]
|
481 |
+
[2024-12-10 06:25:26,074] torch.distributed.run: [WARNING] *****************************************
|
482 |
+
[2024-12-10 06:25:26,074] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
483 |
+
[2024-12-10 06:25:26,074] torch.distributed.run: [WARNING] *****************************************
|
484 |
+
[2024-12-10 06:25:26,114] torch.distributed.run: [WARNING]
|
485 |
+
[2024-12-10 06:25:26,114] torch.distributed.run: [WARNING] *****************************************
|
486 |
+
[2024-12-10 06:25:26,114] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
487 |
+
[2024-12-10 06:25:26,114] torch.distributed.run: [WARNING] *****************************************
|
488 |
+
[2024-12-10 06:25:26,181] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
489 |
+
[2024-12-10 06:25:26,181] torch.distributed.run: [WARNING]
|
490 |
+
[2024-12-10 06:25:26,181] torch.distributed.run: [WARNING] *****************************************
|
491 |
+
[2024-12-10 06:25:26,181] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
492 |
+
[2024-12-10 06:25:26,181] torch.distributed.run: [WARNING] *****************************************
|
493 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Config:
|
494 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Config(general=GeneralArgs(project='debug',
|
495 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: run='1.34G_dp16_tp4_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k',
|
496 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: seed=42,
|
497 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: step=None,
|
498 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: consumed_train_samples=None,
|
499 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: benchmark_csv_path=PosixPath('benchmark/results/bench_final.csv'),
|
500 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: ignore_sanity_checks=True),
|
501 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: parallelism=ParallelismArgs(dp=16,
|
502 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pp=1,
|
503 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tp=4,
|
504 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fa156884e20>,
|
505 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tp_mode=<TensorParallelLinearMode.ALL_REDUCE: 1>,
|
506 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tp_linear_async_communication=False,
|
507 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: recompute_layer=False,
|
508 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tp_recompute_allgather=True,
|
509 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: expert_parallel_size=1),
|
510 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
511 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: eos_token_id=0,
|
512 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: hidden_act='silu',
|
513 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: hidden_size=2048,
|
514 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: initializer_range=0.02,
|
515 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: intermediate_size=8192,
|
516 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: is_llama_config=True,
|
517 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: max_position_embeddings=2048,
|
518 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_attention_heads=32,
|
519 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_hidden_layers=16,
|
520 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_key_value_heads=8,
|
521 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pad_token_id=None,
|
522 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pretraining_tp=1,
|
523 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rms_norm_eps=1e-05,
|
524 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_scaling=None,
|
525 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_theta=10000.0,
|
526 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_interleaved=False,
|
527 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tie_word_embeddings=True,
|
528 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: use_cache=True,
|
529 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: vocab_size=131072),
|
530 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: init_method=RandomInit(std=0.02),
|
531 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: dtype=torch.bfloat16,
|
532 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: make_vocab_size_divisible_by=1,
|
533 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: ddp_bucket_cap_mb=25),
|
534 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
535 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tokenizer_revision=None,
|
536 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tokenizer_max_length=None),
|
537 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
538 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: checkpoint_interval=10000,
|
539 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: save_initial_state=False,
|
540 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: save_final_state=False,
|
541 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: resume_checkpoint_path=None,
|
542 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: checkpoints_path_is_shared_file_system=False),
|
543 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: logging=LoggingArgs(log_level='info',
|
544 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: log_level_replica='info',
|
545 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: iteration_step_info_interval=1),
|
546 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tokens=TokensArgs(sequence_length=2048,
|
547 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: train_steps=100,
|
548 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: micro_batch_size=1,
|
549 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: batch_accumulation_per_replica=32,
|
550 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: val_check_interval=100,
|
551 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: limit_val_batches=0,
|
552 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: limit_test_batches=0),
|
553 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
554 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: adam_beta1=0.9,
|
555 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: adam_beta2=0.95,
|
556 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: torch_adam_is_fused=True,
|
557 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: name='adamW'),
|
558 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: zero_stage=1,
|
559 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: weight_decay=0.01,
|
560 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: clip_grad=1.0,
|
561 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: accumulate_grad_in_fp32=True,
|
562 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
563 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lr_warmup_steps=2,
|
564 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lr_warmup_style='linear',
|
565 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lr_decay_style='cosine',
|
566 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lr_decay_steps=13,
|
567 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lr_decay_starting_step=None,
|
568 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: min_decay_lr=1e-05)),
|
569 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
570 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: start_training_step=1,
|
571 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: data=DataArgs(dataset=None,
|
572 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: seed=42,
|
573 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_loading_workers=1))],
|
574 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: profiler=None,
|
575 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: lighteval=None,
|
576 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: s3_upload=None)
|
577 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Model Config:
|
578 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: LlamaConfig(bos_token_id=0,
|
579 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: eos_token_id=0,
|
580 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: hidden_act='silu',
|
581 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: hidden_size=2048,
|
582 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: initializer_range=0.02,
|
583 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: intermediate_size=8192,
|
584 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: is_llama_config=True,
|
585 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: max_position_embeddings=2048,
|
586 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_attention_heads=32,
|
587 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_hidden_layers=16,
|
588 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: num_key_value_heads=8,
|
589 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pad_token_id=None,
|
590 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: pretraining_tp=1,
|
591 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rms_norm_eps=1e-05,
|
592 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_scaling=None,
|
593 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_theta=10000.0,
|
594 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: rope_interleaved=False,
|
595 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: tie_word_embeddings=True,
|
596 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: use_cache=True,
|
597 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: vocab_size=131072)
|
598 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Building model..
|
599 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Initialize RoPE Theta = 10000.0
|
600 |
+
12/10/2024 06:26:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Setting PP block ranks...
|
601 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Total number of parameters: 1.24G (2368.52MiB)
|
602 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Local number of parameters: 310M (592.13MiB)
|
603 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=2|ip-26-0-161-153]: Local number of parameters: 310M (592.13MiB)
|
604 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=3|ip-26-0-161-153]: Local number of parameters: 310M (592.13MiB)
|
605 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=1|ip-26-0-161-153]: Local number of parameters: 310M (592.13MiB)
|
606 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=2|ip-26-0-161-153]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5408.00MiB Peak reserved: 26914.00MiB
|
607 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5408.00MiB Peak reserved: 26914.00MiB
|
608 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=3|ip-26-0-161-153]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5408.00MiB Peak reserved: 27938.00MiB
|
609 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=1|ip-26-0-161-153]: [After model building] Memory usage: 592.15MiB. Peak allocated: 5408.00MiB Peak reserved: 26914.00MiB
|
610 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: No checkpoint path provided.
|
611 |
+
12/10/2024 06:26:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Parametrizing model parameters using StandardParametrizator
|
612 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [Optimizer Building] Using LearningRateForSP as learning rate
|
613 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] Size of optimizer params per rank:
|
614 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 0 has 19.4M out of 310M (6.25%) params' optimizer states
|
615 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 1 has 19.4M out of 310M (6.25%) params' optimizer states
|
616 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 2 has 19.4M out of 310M (6.25%) params' optimizer states
|
617 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 3 has 19.4M out of 310M (6.25%) params' optimizer states
|
618 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 4 has 19.4M out of 310M (6.25%) params' optimizer states
|
619 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 5 has 19.4M out of 310M (6.25%) params' optimizer states
|
620 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 6 has 19.4M out of 310M (6.25%) params' optimizer states
|
621 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 7 has 19.4M out of 310M (6.25%) params' optimizer states
|
622 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 8 has 19.4M out of 310M (6.25%) params' optimizer states
|
623 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 9 has 19.4M out of 310M (6.25%) params' optimizer states
|
624 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 10 has 19.4M out of 310M (6.25%) params' optimizer states
|
625 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 11 has 19.4M out of 310M (6.25%) params' optimizer states
|
626 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 12 has 19.4M out of 310M (6.25%) params' optimizer states
|
627 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 13 has 19.4M out of 310M (6.25%) params' optimizer states
|
628 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 14 has 19.4M out of 310M (6.25%) params' optimizer states
|
629 |
+
12/10/2024 06:26:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [ZeRO sharding] DP Rank 15 has 19.4M out of 310M (6.25%) params' optimizer states
|
630 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
631 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Using dummy data generator
|
632 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [Training Plan] There are 1 training stages
|
633 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [Stage Stable Training Stage] start from step 1
|
634 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]:
|
635 |
+
12/10/2024 06:26:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: [Start training] datetime: 2024-12-10 06:26:19.407113 | mbs: 1 | grad_accum: 32 | global_batch_size: 512 | sequence_length: 2048 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
636 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory.
|
637 |
+
wandb: WARNING Path /fsx/nouamane/.cache/wandb/wandb/ wasn't writable, using system temp directory
|
638 |
+
wandb: Tracking run with wandb version 0.16.0
|
639 |
+
wandb: W&B syncing is set to `offline` in this directory.
|
640 |
+
wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.
|
641 |
+
12/10/2024 06:26:27 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
642 |
+
12/10/2024 06:26:27 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 1850.43MiB. Peak allocated 5408.00MiB. Peak reserved: 28100.00MiB
|
643 |
+
12/10/2024 06:26:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 1918.48MiB. Peak allocated 3553.85MiB. Peak reserved: 3990.00MiB
|
644 |
+
12/10/2024 06:26:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: iteration: 1 / 100 | consumed_tokens: 1.05M | elapsed_time_per_iteration_ms: 3.77K | tokens_per_sec: 278K | tokens_per_sec_per_gpu: 4.34K | global_batch_size: 512 | lm_loss: 12.2 | lr: 0.00015 | model_tflops_per_gpu: 35.9 | hardware_tflops_per_gpu: 35.9 | grad_norm: 0.651 | cuda_memory_allocated: 2.17G | cuda_max_memory_reserved: 4.22G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70.4G | hd_free_memory_tb: 242G
|
645 |
+
12/10/2024 06:26:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 2066.56MiB. Peak allocated 3287.83MiB. Peak reserved: 4026.00MiB
|
646 |
+
12/10/2024 06:26:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 2066.57MiB. Peak allocated 3700.94MiB. Peak reserved: 4044.00MiB
|
647 |
+
12/10/2024 06:26:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: iteration: 2 / 100 | consumed_tokens: 2.1M | elapsed_time_per_iteration_ms: 1.95K | tokens_per_sec: 539K | tokens_per_sec_per_gpu: 8.42K | global_batch_size: 512 | lm_loss: 12.2 | lr: 0.0003 | model_tflops_per_gpu: 69.5 | hardware_tflops_per_gpu: 69.5 | grad_norm: 0.651 | cuda_memory_allocated: 2.17G | cuda_max_memory_reserved: 4.24G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70.4G | hd_free_memory_tb: 242G
|
648 |
+
12/10/2024 06:26:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 2066.56MiB. Peak allocated 3287.85MiB. Peak reserved: 4044.00MiB
|
649 |
+
12/10/2024 06:26:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Memory usage: 2066.57MiB. Peak allocated 3699.95MiB. Peak reserved: 4044.00MiB
|
650 |
+
num_params
|
651 |
+
{'total': 1241784320, 'local': 310446080}
|
652 |
+
num_params
|
653 |
+
{'total': 1241784320, 'local': 310446080}
|
654 |
+
num_paramsnum_paramsnum_paramsnum_params
|
655 |
+
|
656 |
+
|
657 |
+
num_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
658 |
+
|
659 |
+
{'total': 1241784320, 'local': 310446080}
|
660 |
+
|
661 |
+
|
662 |
+
{'total': 1241784320, 'local': 310446080}
|
663 |
+
{'total': 1241784320, 'local': 310446080}
|
664 |
+
num_params
|
665 |
+
num_params{'total': 1241784320, 'local': 310446080}num_paramsnum_params
|
666 |
+
|
667 |
+
|
668 |
+
num_params
|
669 |
+
{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
670 |
+
|
671 |
+
|
672 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
673 |
+
num_paramsnum_params
|
674 |
+
num_params
|
675 |
+
{'total': 1241784320, 'local': 310446080}
|
676 |
+
num_params
|
677 |
+
{'total': 1241784320, 'local': 310446080}
|
678 |
+
|
679 |
+
{'total': 1241784320, 'local': 310446080}
|
680 |
+
{'total': 1241784320, 'local': 310446080}
|
681 |
+
num_params
|
682 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
683 |
+
|
684 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
685 |
+
|
686 |
+
{'total': 1241784320, 'local': 310446080}
|
687 |
+
num_paramsnum_paramsnum_params
|
688 |
+
num_params
|
689 |
+
|
690 |
+
{'total': 1241784320, 'local': 310446080}num_params
|
691 |
+
{'total': 1241784320, 'local': 310446080}
|
692 |
+
{'total': 1241784320, 'local': 310446080}
|
693 |
+
|
694 |
+
|
695 |
+
{'total': 1241784320, 'local': 310446080}
|
696 |
+
{'total': 1241784320, 'local': 310446080}
|
697 |
+
num_paramsnum_paramsnum_params
|
698 |
+
num_params
|
699 |
+
|
700 |
+
{'total': 1241784320, 'local': 310446080}
|
701 |
+
num_paramsnum_params
|
702 |
+
{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
703 |
+
|
704 |
+
{'total': 1241784320, 'local': 310446080}
|
705 |
+
|
706 |
+
|
707 |
+
{'total': 1241784320, 'local': 310446080}
|
708 |
+
|
709 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
710 |
+
|
711 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
712 |
+
num_params
|
713 |
+
{'total': 1241784320, 'local': 310446080}
|
714 |
+
num_paramsnum_paramsnum_params
|
715 |
+
|
716 |
+
|
717 |
+
num_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
718 |
+
|
719 |
+
|
720 |
+
|
721 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
722 |
+
|
723 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
724 |
+
|
725 |
+
num_paramsnum_params
|
726 |
+
|
727 |
+
num_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
728 |
+
|
729 |
+
|
730 |
+
{'total': 1241784320, 'local': 310446080}
|
731 |
+
num_paramsnum_params
|
732 |
+
num_params
|
733 |
+
{'total': 1241784320, 'local': 310446080}
|
734 |
+
{'total': 1241784320, 'local': 310446080}
|
735 |
+
|
736 |
+
{'total': 1241784320, 'local': 310446080}
|
737 |
+
num_paramsnum_params
|
738 |
+
num_params
|
739 |
+
num_params
|
740 |
+
num_paramsnum_params{'total': 1241784320, 'local': 310446080}
|
741 |
+
|
742 |
+
|
743 |
+
{'total': 1241784320, 'local': 310446080}
|
744 |
+
{'total': 1241784320, 'local': 310446080}
|
745 |
+
|
746 |
+
{'total': 1241784320, 'local': 310446080}
|
747 |
+
num_params
|
748 |
+
num_params{'total': 1241784320, 'local': 310446080}{'total': 1241784320, 'local': 310446080}
|
749 |
+
|
750 |
+
|
751 |
+
{'total': 1241784320, 'local': 310446080}
|
752 |
+
num_params
|
753 |
+
{'total': 1241784320, 'local': 310446080}
|
754 |
+
{'total': 1241784320, 'local': 310446080}
|
755 |
+
{'total': 1241784320, 'local': 310446080}
|
756 |
+
|
757 |
+
{'total': 1241784320, 'local': 310446080}
|
758 |
+
num_params
|
759 |
+
num_params
|
760 |
+
num_params{'total': 1241784320, 'local': 310446080}
|
761 |
+
|
762 |
+
{'total': 1241784320, 'local': 310446080}
|
763 |
+
{'total': 1241784320, 'local': 310446080}
|
764 |
+
{'total': 1241784320, 'local': 310446080}
|
765 |
+
num_params
|
766 |
+
{'total': 1241784320, 'local': 310446080}
|
767 |
+
num_params
|
768 |
+
{'total': 1241784320, 'local': 310446080}
|
769 |
+
{'total': 1241784320, 'local': 310446080}
|
770 |
+
num_params
|
771 |
+
{'total': 1241784320, 'local': 310446080}
|
772 |
+
num_params
|
773 |
+
{'total': 1241784320, 'local': 310446080}
|
774 |
+
num_params
|
775 |
+
{'total': 1241784320, 'local': 310446080}
|
776 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: iteration: 3 / 100 | consumed_tokens: 3.15M | elapsed_time_per_iteration_ms: 1.92K | tokens_per_sec: 546K | tokens_per_sec_per_gpu: 8.53K | global_batch_size: 512 | lm_loss: 12.2 | lr: 0.000296 | model_tflops_per_gpu: 70.4 | hardware_tflops_per_gpu: 70.4 | grad_norm: 0.64 | cuda_memory_allocated: 2.17G | cuda_max_memory_reserved: 4.24G | hd_total_memory_tb: 312G | hd_used_memory_tb: 70.4G | hd_free_memory_tb: 242G
|
777 |
+
num_params
|
778 |
+
{'total': 1241784320, 'local': 310446080}
|
779 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: | job_id | name | nodes | seq_len | mbs | batch_accum | gbs | mTFLOPs | hTFLOPs | tok/s/gpu | AllReduce (GB/s) | AllGather (GB/s) | ReduceScatter (GB/s) | AR Intra-node (GB/s) | AG Intra-node (GB/s) | RS Intra-node (GB/s) | Mem Alloc (GB) | Mem Res (GB) | dp | pp | tp | pp_engine | tp_mode | tp_async_comm | hidden_size | hidden_act | num_layers | num_heads | num_kv_heads | max_pos | vocab_size | tie_word_embeddings | dtype | zero_stage | ddp_bucket_cap_mb | accumulate_grad_in_fp32 | Total Params | Local Params |
|
780 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: | -------- | --------------------------------------------------------------- | ----- | ------- | --- | ----------- | --- | ------- | ------- | --------- | ---------------- | ---------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------- | ------------ | -- | -- | -- | --------------------------------------------------------------------------------------------------------- | ----------------------------------- | ------------- | ----------- | ---------- | ---------- | --------- | ------------ | ------- | ---------- | ------------------- | -------------- | ---------- | ----------------- | ----------------------- | ------------ | ------------ |
|
781 |
+
12/10/2024 06:26:35 [INFO|DP=8|PP=0|TP=2|ip-26-0-168-120]: Throughput logging complete
|
782 |
+
12/10/2024 06:26:35 [INFO|DP=6|PP=0|TP=1|ip-26-0-166-244]: Throughput logging complete
|
783 |
+
12/10/2024 06:26:35 [INFO|DP=14|PP=0|TP=0|ip-26-0-171-88]: Throughput logging complete
|
784 |
+
12/10/2024 06:26:35 [INFO|DP=10|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
785 |
+
12/10/2024 06:26:35 [INFO|DP=11|PP=0|TP=0|ip-26-0-168-95]: Throughput logging complete
|
786 |
+
12/10/2024 06:26:35 [INFO|DP=12|PP=0|TP=1|ip-26-0-171-102]: Throughput logging complete
|
787 |
+
12/10/2024 06:26:35 [INFO|DP=12|PP=0|TP=0|ip-26-0-171-102]: Throughput logging complete
|
788 |
+
12/10/2024 06:26:35 [INFO|DP=2|PP=0|TP=1|ip-26-0-161-178]: Throughput logging complete
|
789 |
+
12/10/2024 06:26:35 [INFO|DP=8|PP=0|TP=1|ip-26-0-168-120]: Throughput logging complete
|
790 |
+
12/10/2024 06:26:35 [INFO|DP=7|PP=0|TP=2|ip-26-0-166-244]: Throughput logging complete
|
791 |
+
12/10/2024 06:26:35 [INFO|DP=14|PP=0|TP=2|ip-26-0-171-88]: Throughput logging complete
|
792 |
+
12/10/2024 06:26:35 [INFO|DP=9|PP=0|TP=0|ip-26-0-168-120]: Throughput logging complete
|
793 |
+
12/10/2024 06:26:35 [INFO|DP=9|PP=0|TP=2|ip-26-0-168-120]: Throughput logging complete
|
794 |
+
12/10/2024 06:26:35 [INFO|DP=8|PP=0|TP=0|ip-26-0-168-120]: Throughput logging complete
|
795 |
+
12/10/2024 06:26:35 [INFO|DP=9|PP=0|TP=3|ip-26-0-168-120]: Throughput logging complete
|
796 |
+
12/10/2024 06:26:35 [INFO|DP=8|PP=0|TP=3|ip-26-0-168-120]: Throughput logging complete
|
797 |
+
12/10/2024 06:26:35 [INFO|DP=13|PP=0|TP=1|ip-26-0-171-102]: Throughput logging complete
|
798 |
+
12/10/2024 06:26:35 [INFO|DP=4|PP=0|TP=2|ip-26-0-166-214]: Throughput logging complete
|
799 |
+
12/10/2024 06:26:35 [INFO|DP=4|PP=0|TP=1|ip-26-0-166-214]: Throughput logging complete
|
800 |
+
12/10/2024 06:26:35 [INFO|DP=4|PP=0|TP=3|ip-26-0-166-214]: Throughput logging complete
|
801 |
+
12/10/2024 06:26:35 [INFO|DP=5|PP=0|TP=3|ip-26-0-166-214]: Throughput logging complete
|
802 |
+
12/10/2024 06:26:35 [INFO|DP=3|PP=0|TP=0|ip-26-0-161-178]: Throughput logging complete
|
803 |
+
12/10/2024 06:26:35 [INFO|DP=13|PP=0|TP=2|ip-26-0-171-102]: Throughput logging complete
|
804 |
+
12/10/2024 06:26:35 [INFO|DP=12|PP=0|TP=3|ip-26-0-171-102]: Throughput logging complete
|
805 |
+
12/10/2024 06:26:35 [INFO|DP=6|PP=0|TP=3|ip-26-0-166-244]: Throughput logging complete
|
806 |
+
12/10/2024 06:26:35 [INFO|DP=7|PP=0|TP=0|ip-26-0-166-244]: Throughput logging complete
|
807 |
+
12/10/2024 06:26:35 [INFO|DP=7|PP=0|TP=3|ip-26-0-166-244]: Throughput logging complete
|
808 |
+
12/10/2024 06:26:35 [INFO|DP=2|PP=0|TP=3|ip-26-0-161-178]: Throughput logging complete
|
809 |
+
12/10/2024 06:26:35 [INFO|DP=9|PP=0|TP=1|ip-26-0-168-120]: Throughput logging complete
|
810 |
+
12/10/2024 06:26:35 [INFO|DP=10|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
811 |
+
12/10/2024 06:26:35 [INFO|DP=13|PP=0|TP=0|ip-26-0-171-102]: Throughput logging complete
|
812 |
+
12/10/2024 06:26:35 [INFO|DP=14|PP=0|TP=1|ip-26-0-171-88]: Throughput logging complete
|
813 |
+
12/10/2024 06:26:35 [INFO|DP=15|PP=0|TP=2|ip-26-0-171-88]: Throughput logging complete
|
814 |
+
12/10/2024 06:26:35 [INFO|DP=15|PP=0|TP=1|ip-26-0-171-88]: Throughput logging complete
|
815 |
+
12/10/2024 06:26:35 [INFO|DP=2|PP=0|TP=2|ip-26-0-161-178]: Throughput logging complete
|
816 |
+
12/10/2024 06:26:35 [INFO|DP=13|PP=0|TP=3|ip-26-0-171-102]: Throughput logging complete
|
817 |
+
12/10/2024 06:26:35 [INFO|DP=5|PP=0|TP=1|ip-26-0-166-214]: Throughput logging complete
|
818 |
+
12/10/2024 06:26:35 [INFO|DP=5|PP=0|TP=0|ip-26-0-166-214]: Throughput logging complete
|
819 |
+
12/10/2024 06:26:35 [INFO|DP=5|PP=0|TP=2|ip-26-0-166-214]: Throughput logging complete
|
820 |
+
12/10/2024 06:26:35 [INFO|DP=15|PP=0|TP=0|ip-26-0-171-88]: Throughput logging complete
|
821 |
+
12/10/2024 06:26:35 [INFO|DP=3|PP=0|TP=1|ip-26-0-161-178]: Throughput logging complete
|
822 |
+
12/10/2024 06:26:35 [INFO|DP=11|PP=0|TP=2|ip-26-0-168-95]: Throughput logging complete
|
823 |
+
12/10/2024 06:26:35 [INFO|DP=11|PP=0|TP=1|ip-26-0-168-95]: Throughput logging complete
|
824 |
+
12/10/2024 06:26:35 [INFO|DP=10|PP=0|TP=2|ip-26-0-168-95]: Throughput logging complete
|
825 |
+
12/10/2024 06:26:35 [INFO|DP=11|PP=0|TP=3|ip-26-0-168-95]: Throughput logging complete
|
826 |
+
12/10/2024 06:26:35 [INFO|DP=10|PP=0|TP=3|ip-26-0-168-95]: Throughput logging complete
|
827 |
+
12/10/2024 06:26:35 [INFO|DP=4|PP=0|TP=0|ip-26-0-166-214]: Throughput logging complete
|
828 |
+
12/10/2024 06:26:35 [INFO|DP=15|PP=0|TP=3|ip-26-0-171-88]: Throughput logging complete
|
829 |
+
12/10/2024 06:26:35 [INFO|DP=14|PP=0|TP=3|ip-26-0-171-88]: Throughput logging complete
|
830 |
+
12/10/2024 06:26:35 [INFO|DP=2|PP=0|TP=0|ip-26-0-161-178]: Throughput logging complete
|
831 |
+
12/10/2024 06:26:35 [INFO|DP=3|PP=0|TP=2|ip-26-0-161-178]: Throughput logging complete
|
832 |
+
12/10/2024 06:26:35 [INFO|DP=3|PP=0|TP=3|ip-26-0-161-178]: Throughput logging complete
|
833 |
+
12/10/2024 06:26:35 [INFO|DP=6|PP=0|TP=0|ip-26-0-166-244]: Throughput logging complete
|
834 |
+
12/10/2024 06:26:35 [INFO|DP=12|PP=0|TP=2|ip-26-0-171-102]: Throughput logging complete
|
835 |
+
12/10/2024 06:26:35 [INFO|DP=6|PP=0|TP=2|ip-26-0-166-244]: Throughput logging complete
|
836 |
+
12/10/2024 06:26:35 [INFO|DP=7|PP=0|TP=1|ip-26-0-166-244]: Throughput logging complete
|
837 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: | 13265791 | 1.34G_dp16_tp4_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k | 8 | 2048 | 1 | 32 | 512 | 70.38 | 70.38 | 8526.75 | 223.78 | 167.60 | 166.93 | 460.77 | 265.16 | 264.99 | 3.21 | 3.95 | 16 | 1 | 4 | <nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fa156884e20> | TensorParallelLinearMode.ALL_REDUCE | False | 2048 | silu | 16 | 32 | 8 | 2048 | 131072 | True | torch.bfloat16 | 1 | 25 | True | 1.24G | 310M |
|
838 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-153]: Throughput logging complete
|
839 |
+
12/10/2024 06:26:35 [INFO|DP=1|PP=0|TP=0|ip-26-0-161-153]: Throughput logging complete
|
840 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=2|ip-26-0-161-153]: Throughput logging complete
|
841 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=3|ip-26-0-161-153]: Throughput logging complete
|
842 |
+
12/10/2024 06:26:35 [INFO|DP=1|PP=0|TP=3|ip-26-0-161-153]: Throughput logging complete
|
843 |
+
12/10/2024 06:26:35 [INFO|DP=1|PP=0|TP=1|ip-26-0-161-153]: Throughput logging complete
|
844 |
+
12/10/2024 06:26:35 [INFO|DP=1|PP=0|TP=2|ip-26-0-161-153]: Throughput logging complete
|
845 |
+
12/10/2024 06:26:35 [INFO|DP=0|PP=0|TP=1|ip-26-0-161-153]: Throughput logging complete
|
846 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
847 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510726 closing signal SIGTERM
|
848 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510727 closing signal SIGTERM
|
849 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
850 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
851 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510728 closing signal SIGTERM
|
852 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510729 closing signal SIGTERM
|
853 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345832 closing signal SIGTERM
|
854 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27294 closing signal SIGTERM
|
855 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27295 closing signal SIGTERM
|
856 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345833 closing signal SIGTERM
|
857 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27296 closing signal SIGTERM
|
858 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27297 closing signal SIGTERM
|
859 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345834 closing signal SIGTERM
|
860 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
861 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345835 closing signal SIGTERM
|
862 |
+
[2024-12-10 06:26:35,947] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
863 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179668 closing signal SIGTERM
|
864 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179669 closing signal SIGTERM
|
865 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
866 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179670 closing signal SIGTERM
|
867 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963246 closing signal SIGTERM
|
868 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
869 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963247 closing signal SIGTERM
|
870 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35833 closing signal SIGTERM
|
871 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963248 closing signal SIGTERM
|
872 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345836 closing signal SIGTERM
|
873 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35834 closing signal SIGTERM
|
874 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963249 closing signal SIGTERM
|
875 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35835 closing signal SIGTERM
|
876 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35836 closing signal SIGTERM
|
877 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35837 closing signal SIGTERM
|
878 |
+
slurmstepd: error: *** JOB 13265791 ON ip-26-0-161-153 CANCELLED AT 2024-12-10T06:26:35 ***
|
879 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35838 closing signal SIGTERM
|
880 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510730 closing signal SIGTERM
|
881 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27298 closing signal SIGTERM
|
882 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510731 closing signal SIGTERM
|
883 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27299 closing signal SIGTERM
|
884 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510732 closing signal SIGTERM
|
885 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27300 closing signal SIGTERM
|
886 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
887 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 510733 closing signal SIGTERM
|
888 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2650799 closing signal SIGTERM
|
889 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179671 closing signal SIGTERM
|
890 |
+
[2024-12-10 06:26:35,948] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179672 closing signal SIGTERM
|
891 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2650800 closing signal SIGTERM
|
892 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2650801 closing signal SIGTERM
|
893 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2650802 closing signal SIGTERM
|
894 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 2650803 closing signal SIGTERM
|
895 |
+
slurmstepd: error: *** STEP 13265791.0 ON ip-26-0-161-153 CANCELLED AT 2024-12-10T06:26:35 ***
|
896 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345837 closing signal SIGTERM
|
897 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 27301 closing signal SIGTERM
|
898 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345838 closing signal SIGTERM
|
899 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963250 closing signal SIGTERM
|
900 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963251 closing signal SIGTERM
|
901 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963252 closing signal SIGTERM
|
902 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 3963253 closing signal SIGTERM
|
903 |
+
[2024-12-10 06:26:35,949] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
904 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 84995 closing signal SIGTERM
|
905 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 84996 closing signal SIGTERM
|
906 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 84997 closing signal SIGTERM
|
907 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 84998 closing signal SIGTERM
|
908 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 84999 closing signal SIGTERM
|
909 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179673 closing signal SIGTERM
|
910 |
+
[2024-12-10 06:26:35,951] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 1345839 closing signal SIGTERM
|
911 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 85000 closing signal SIGTERM
|
912 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179674 closing signal SIGTERM
|
913 |
+
[2024-12-10 06:26:35,951] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 85001 closing signal SIGTERM
|
914 |
+
[2024-12-10 06:26:35,951] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 85002 closing signal SIGTERM
|
915 |
+
[2024-12-10 06:26:35,950] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 179675 closing signal SIGTERM
|
916 |
+
[2024-12-10 06:26:35,952] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35839 closing signal SIGTERM
|
917 |
+
[2024-12-10 06:26:35,952] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 35840 closing signal SIGTERM
|
logs/13265922-bench_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13266540-bench_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab32k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13385099-bench_1.14G_dp2_tp8_pp1_acc16_mbs64_seq2048_zero1_tpmodeALL_vocab32k.out
ADDED
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-160-103
|
400 |
+
+ export NODELIST=ip-26-0-160-103
|
401 |
+
+ NODELIST=ip-26-0-160-103
|
402 |
+
++ scontrol show hostnames ip-26-0-160-103
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-160-103
|
405 |
+
+ MASTER_NODE=ip-26-0-160-103
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NANOTRON_BENCHMARK=1
|
417 |
+
+ NANOTRON_BENCHMARK=1
|
418 |
+
+ echo 'Master node: ip-26-0-160-103'
|
419 |
+
Master node: ip-26-0-160-103
|
420 |
+
+ echo 'All nodes: ip-26-0-160-103'
|
421 |
+
All nodes: ip-26-0-160-103
|
422 |
+
+ echo 'World size: 8'
|
423 |
+
World size: 8
|
424 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13385099 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-160-103:12356 run_train.py
|
425 |
+
[2024-12-12 16:03:12,708] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
426 |
+
[2024-12-12 16:03:12,709] torch.distributed.run: [WARNING]
|
427 |
+
[2024-12-12 16:03:12,709] torch.distributed.run: [WARNING] *****************************************
|
428 |
+
[2024-12-12 16:03:12,709] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
429 |
+
[2024-12-12 16:03:12,709] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
431 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
432 |
+
run_train.py: error: the following arguments are required: --config-file
|
433 |
+
run_train.py: error: the following arguments are required: --config-file
|
434 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
435 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
436 |
+
run_train.py: error: the following arguments are required: --config-file
|
437 |
+
run_train.py: error: the following arguments are required: --config-file
|
438 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
439 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
440 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
441 |
+
usage: run_train.py [-h] --config-file CONFIG_FILE
|
442 |
+
run_train.py: error: the following arguments are required: --config-file
|
443 |
+
run_train.py: error: the following arguments are required: --config-file
|
444 |
+
run_train.py: error: the following arguments are required: --config-file
|
445 |
+
run_train.py: error: the following arguments are required: --config-file
|
446 |
+
[2024-12-12 16:03:23,056] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 2) local_rank: 0 (pid: 1841725) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
447 |
+
Traceback (most recent call last):
|
448 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
449 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
450 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
451 |
+
return f(*args, **kwargs)
|
452 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
453 |
+
run(args)
|
454 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
455 |
+
elastic_launch(
|
456 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
457 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
458 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
459 |
+
raise ChildFailedError(
|
460 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
461 |
+
============================================================
|
462 |
+
run_train.py FAILED
|
463 |
+
------------------------------------------------------------
|
464 |
+
Failures:
|
465 |
+
[1]:
|
466 |
+
time : 2024-12-12_16:03:23
|
467 |
+
host : ip-26-0-160-103.ec2.internal
|
468 |
+
rank : 1 (local_rank: 1)
|
469 |
+
exitcode : 2 (pid: 1841726)
|
470 |
+
error_file: <N/A>
|
471 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
472 |
+
[2]:
|
473 |
+
time : 2024-12-12_16:03:23
|
474 |
+
host : ip-26-0-160-103.ec2.internal
|
475 |
+
rank : 2 (local_rank: 2)
|
476 |
+
exitcode : 2 (pid: 1841727)
|
477 |
+
error_file: <N/A>
|
478 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
479 |
+
[3]:
|
480 |
+
time : 2024-12-12_16:03:23
|
481 |
+
host : ip-26-0-160-103.ec2.internal
|
482 |
+
rank : 3 (local_rank: 3)
|
483 |
+
exitcode : 2 (pid: 1841728)
|
484 |
+
error_file: <N/A>
|
485 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
486 |
+
[4]:
|
487 |
+
time : 2024-12-12_16:03:23
|
488 |
+
host : ip-26-0-160-103.ec2.internal
|
489 |
+
rank : 4 (local_rank: 4)
|
490 |
+
exitcode : 2 (pid: 1841729)
|
491 |
+
error_file: <N/A>
|
492 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
493 |
+
[5]:
|
494 |
+
time : 2024-12-12_16:03:23
|
495 |
+
host : ip-26-0-160-103.ec2.internal
|
496 |
+
rank : 5 (local_rank: 5)
|
497 |
+
exitcode : 2 (pid: 1841730)
|
498 |
+
error_file: <N/A>
|
499 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
500 |
+
[6]:
|
501 |
+
time : 2024-12-12_16:03:23
|
502 |
+
host : ip-26-0-160-103.ec2.internal
|
503 |
+
rank : 6 (local_rank: 6)
|
504 |
+
exitcode : 2 (pid: 1841731)
|
505 |
+
error_file: <N/A>
|
506 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
507 |
+
[7]:
|
508 |
+
time : 2024-12-12_16:03:23
|
509 |
+
host : ip-26-0-160-103.ec2.internal
|
510 |
+
rank : 7 (local_rank: 7)
|
511 |
+
exitcode : 2 (pid: 1841732)
|
512 |
+
error_file: <N/A>
|
513 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
514 |
+
------------------------------------------------------------
|
515 |
+
Root Cause (first observed failure):
|
516 |
+
[0]:
|
517 |
+
time : 2024-12-12_16:03:23
|
518 |
+
host : ip-26-0-160-103.ec2.internal
|
519 |
+
rank : 0 (local_rank: 0)
|
520 |
+
exitcode : 2 (pid: 1841725)
|
521 |
+
error_file: <N/A>
|
522 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
523 |
+
============================================================
|
524 |
+
srun: error: ip-26-0-160-103: task 0: Exited with exit code 1
|
logs/13401577-bench_stress_test.out
ADDED
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-169-207
|
400 |
+
+ export NODELIST=ip-26-0-169-207
|
401 |
+
+ NODELIST=ip-26-0-169-207
|
402 |
+
++ scontrol show hostnames ip-26-0-169-207
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-169-207
|
405 |
+
+ MASTER_NODE=ip-26-0-169-207
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NCCL_DEBUG=INFO
|
417 |
+
+ NCCL_DEBUG=INFO
|
418 |
+
+ export NANOTRON_BENCHMARK=1
|
419 |
+
+ NANOTRON_BENCHMARK=1
|
420 |
+
+ echo 'Master node: ip-26-0-169-207'
|
421 |
+
Master node: ip-26-0-169-207
|
422 |
+
+ echo 'All nodes: ip-26-0-169-207'
|
423 |
+
All nodes: ip-26-0-169-207
|
424 |
+
+ echo 'World size: 8'
|
425 |
+
World size: 8
|
426 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13401577 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-169-207:12356 run_train.py --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
427 |
+
[2024-12-12 23:59:56,415] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
428 |
+
[2024-12-12 23:59:56,415] torch.distributed.run: [WARNING]
|
429 |
+
[2024-12-12 23:59:56,415] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
[2024-12-12 23:59:56,415] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
431 |
+
[2024-12-12 23:59:56,415] torch.distributed.run: [WARNING] *****************************************
|
432 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
433 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
434 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
435 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
436 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
437 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
438 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
439 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
440 |
+
[2024-12-13 00:00:01,726] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 2) local_rank: 0 (pid: 222779) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
441 |
+
Traceback (most recent call last):
|
442 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
443 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
444 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
445 |
+
return f(*args, **kwargs)
|
446 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
447 |
+
run(args)
|
448 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
449 |
+
elastic_launch(
|
450 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
451 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
452 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
453 |
+
raise ChildFailedError(
|
454 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
455 |
+
============================================================
|
456 |
+
run_train.py FAILED
|
457 |
+
------------------------------------------------------------
|
458 |
+
Failures:
|
459 |
+
[1]:
|
460 |
+
time : 2024-12-13_00:00:01
|
461 |
+
host : ip-26-0-169-207.ec2.internal
|
462 |
+
rank : 1 (local_rank: 1)
|
463 |
+
exitcode : 2 (pid: 222780)
|
464 |
+
error_file: <N/A>
|
465 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
466 |
+
[2]:
|
467 |
+
time : 2024-12-13_00:00:01
|
468 |
+
host : ip-26-0-169-207.ec2.internal
|
469 |
+
rank : 2 (local_rank: 2)
|
470 |
+
exitcode : 2 (pid: 222781)
|
471 |
+
error_file: <N/A>
|
472 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
473 |
+
[3]:
|
474 |
+
time : 2024-12-13_00:00:01
|
475 |
+
host : ip-26-0-169-207.ec2.internal
|
476 |
+
rank : 3 (local_rank: 3)
|
477 |
+
exitcode : 2 (pid: 222782)
|
478 |
+
error_file: <N/A>
|
479 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
480 |
+
[4]:
|
481 |
+
time : 2024-12-13_00:00:01
|
482 |
+
host : ip-26-0-169-207.ec2.internal
|
483 |
+
rank : 4 (local_rank: 4)
|
484 |
+
exitcode : 2 (pid: 222783)
|
485 |
+
error_file: <N/A>
|
486 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
487 |
+
[5]:
|
488 |
+
time : 2024-12-13_00:00:01
|
489 |
+
host : ip-26-0-169-207.ec2.internal
|
490 |
+
rank : 5 (local_rank: 5)
|
491 |
+
exitcode : 2 (pid: 222784)
|
492 |
+
error_file: <N/A>
|
493 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
494 |
+
[6]:
|
495 |
+
time : 2024-12-13_00:00:01
|
496 |
+
host : ip-26-0-169-207.ec2.internal
|
497 |
+
rank : 6 (local_rank: 6)
|
498 |
+
exitcode : 2 (pid: 222785)
|
499 |
+
error_file: <N/A>
|
500 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
501 |
+
[7]:
|
502 |
+
time : 2024-12-13_00:00:01
|
503 |
+
host : ip-26-0-169-207.ec2.internal
|
504 |
+
rank : 7 (local_rank: 7)
|
505 |
+
exitcode : 2 (pid: 222786)
|
506 |
+
error_file: <N/A>
|
507 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
508 |
+
------------------------------------------------------------
|
509 |
+
Root Cause (first observed failure):
|
510 |
+
[0]:
|
511 |
+
time : 2024-12-13_00:00:01
|
512 |
+
host : ip-26-0-169-207.ec2.internal
|
513 |
+
rank : 0 (local_rank: 0)
|
514 |
+
exitcode : 2 (pid: 222779)
|
515 |
+
error_file: <N/A>
|
516 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
517 |
+
============================================================
|
518 |
+
srun: error: ip-26-0-169-207: task 0: Exited with exit code 1
|
logs/13401875-bench_stress_test.out
ADDED
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-160-103
|
400 |
+
+ export NODELIST=ip-26-0-160-103
|
401 |
+
+ NODELIST=ip-26-0-160-103
|
402 |
+
++ scontrol show hostnames ip-26-0-160-103
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-160-103
|
405 |
+
+ MASTER_NODE=ip-26-0-160-103
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NCCL_DEBUG=INFO
|
417 |
+
+ NCCL_DEBUG=INFO
|
418 |
+
+ export NANOTRON_BENCHMARK=1
|
419 |
+
+ NANOTRON_BENCHMARK=1
|
420 |
+
+ echo 'Master node: ip-26-0-160-103'
|
421 |
+
Master node: ip-26-0-160-103
|
422 |
+
+ echo 'All nodes: ip-26-0-160-103'
|
423 |
+
All nodes: ip-26-0-160-103
|
424 |
+
+ echo 'World size: 8'
|
425 |
+
World size: 8
|
426 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13401875 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-160-103:12356 run_train.py --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
427 |
+
[2024-12-13 00:50:02,240] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
428 |
+
[2024-12-13 00:50:02,240] torch.distributed.run: [WARNING]
|
429 |
+
[2024-12-13 00:50:02,240] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
[2024-12-13 00:50:02,240] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
431 |
+
[2024-12-13 00:50:02,240] torch.distributed.run: [WARNING] *****************************************
|
432 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
433 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
434 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
435 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
436 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
437 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
438 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
439 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
440 |
+
[2024-12-13 00:50:07,537] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 2) local_rank: 0 (pid: 2160924) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
441 |
+
Traceback (most recent call last):
|
442 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
443 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
444 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
445 |
+
return f(*args, **kwargs)
|
446 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
447 |
+
run(args)
|
448 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
449 |
+
elastic_launch(
|
450 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
451 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
452 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
453 |
+
raise ChildFailedError(
|
454 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
455 |
+
============================================================
|
456 |
+
run_train.py FAILED
|
457 |
+
------------------------------------------------------------
|
458 |
+
Failures:
|
459 |
+
[1]:
|
460 |
+
time : 2024-12-13_00:50:07
|
461 |
+
host : ip-26-0-160-103.ec2.internal
|
462 |
+
rank : 1 (local_rank: 1)
|
463 |
+
exitcode : 2 (pid: 2160925)
|
464 |
+
error_file: <N/A>
|
465 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
466 |
+
[2]:
|
467 |
+
time : 2024-12-13_00:50:07
|
468 |
+
host : ip-26-0-160-103.ec2.internal
|
469 |
+
rank : 2 (local_rank: 2)
|
470 |
+
exitcode : 2 (pid: 2160926)
|
471 |
+
error_file: <N/A>
|
472 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
473 |
+
[3]:
|
474 |
+
time : 2024-12-13_00:50:07
|
475 |
+
host : ip-26-0-160-103.ec2.internal
|
476 |
+
rank : 3 (local_rank: 3)
|
477 |
+
exitcode : 2 (pid: 2160927)
|
478 |
+
error_file: <N/A>
|
479 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
480 |
+
[4]:
|
481 |
+
time : 2024-12-13_00:50:07
|
482 |
+
host : ip-26-0-160-103.ec2.internal
|
483 |
+
rank : 4 (local_rank: 4)
|
484 |
+
exitcode : 2 (pid: 2160928)
|
485 |
+
error_file: <N/A>
|
486 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
487 |
+
[5]:
|
488 |
+
time : 2024-12-13_00:50:07
|
489 |
+
host : ip-26-0-160-103.ec2.internal
|
490 |
+
rank : 5 (local_rank: 5)
|
491 |
+
exitcode : 2 (pid: 2160929)
|
492 |
+
error_file: <N/A>
|
493 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
494 |
+
[6]:
|
495 |
+
time : 2024-12-13_00:50:07
|
496 |
+
host : ip-26-0-160-103.ec2.internal
|
497 |
+
rank : 6 (local_rank: 6)
|
498 |
+
exitcode : 2 (pid: 2160930)
|
499 |
+
error_file: <N/A>
|
500 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
501 |
+
[7]:
|
502 |
+
time : 2024-12-13_00:50:07
|
503 |
+
host : ip-26-0-160-103.ec2.internal
|
504 |
+
rank : 7 (local_rank: 7)
|
505 |
+
exitcode : 2 (pid: 2160931)
|
506 |
+
error_file: <N/A>
|
507 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
508 |
+
------------------------------------------------------------
|
509 |
+
Root Cause (first observed failure):
|
510 |
+
[0]:
|
511 |
+
time : 2024-12-13_00:50:07
|
512 |
+
host : ip-26-0-160-103.ec2.internal
|
513 |
+
rank : 0 (local_rank: 0)
|
514 |
+
exitcode : 2 (pid: 2160924)
|
515 |
+
error_file: <N/A>
|
516 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
517 |
+
============================================================
|
518 |
+
srun: error: ip-26-0-160-103: task 0: Exited with exit code 1
|
logs/13401984-bench_stress_test.out
ADDED
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-169-207
|
400 |
+
+ export NODELIST=ip-26-0-169-207
|
401 |
+
+ NODELIST=ip-26-0-169-207
|
402 |
+
++ scontrol show hostnames ip-26-0-169-207
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-169-207
|
405 |
+
+ MASTER_NODE=ip-26-0-169-207
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NCCL_DEBUG=INFO
|
417 |
+
+ NCCL_DEBUG=INFO
|
418 |
+
+ export NANOTRON_BENCHMARK=1
|
419 |
+
+ NANOTRON_BENCHMARK=1
|
420 |
+
+ echo 'Master node: ip-26-0-169-207'
|
421 |
+
Master node: ip-26-0-169-207
|
422 |
+
+ echo 'All nodes: ip-26-0-169-207'
|
423 |
+
All nodes: ip-26-0-169-207
|
424 |
+
+ echo 'World size: 8'
|
425 |
+
World size: 8
|
426 |
+
+ srun torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13401984 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-169-207:12356 run_train.py --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
427 |
+
[2024-12-13 01:07:51,468] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
428 |
+
[2024-12-13 01:07:51,468] torch.distributed.run: [WARNING]
|
429 |
+
[2024-12-13 01:07:51,468] torch.distributed.run: [WARNING] *****************************************
|
430 |
+
[2024-12-13 01:07:51,468] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
431 |
+
[2024-12-13 01:07:51,468] torch.distributed.run: [WARNING] *****************************************
|
432 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
433 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
434 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
435 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
436 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
437 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
438 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
439 |
+
/fsx/nouamane/miniconda/envs/2-1-cu121/bin/python: can't open file '/fsx/nouamane/projects/run_train.py': [Errno 2] No such file or directory
|
440 |
+
[2024-12-13 01:07:56,557] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 2) local_rank: 0 (pid: 287331) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
441 |
+
Traceback (most recent call last):
|
442 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
443 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
444 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
445 |
+
return f(*args, **kwargs)
|
446 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
447 |
+
run(args)
|
448 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
449 |
+
elastic_launch(
|
450 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
451 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
452 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
453 |
+
raise ChildFailedError(
|
454 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
455 |
+
============================================================
|
456 |
+
run_train.py FAILED
|
457 |
+
------------------------------------------------------------
|
458 |
+
Failures:
|
459 |
+
[1]:
|
460 |
+
time : 2024-12-13_01:07:56
|
461 |
+
host : ip-26-0-169-207.ec2.internal
|
462 |
+
rank : 1 (local_rank: 1)
|
463 |
+
exitcode : 2 (pid: 287332)
|
464 |
+
error_file: <N/A>
|
465 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
466 |
+
[2]:
|
467 |
+
time : 2024-12-13_01:07:56
|
468 |
+
host : ip-26-0-169-207.ec2.internal
|
469 |
+
rank : 2 (local_rank: 2)
|
470 |
+
exitcode : 2 (pid: 287333)
|
471 |
+
error_file: <N/A>
|
472 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
473 |
+
[3]:
|
474 |
+
time : 2024-12-13_01:07:56
|
475 |
+
host : ip-26-0-169-207.ec2.internal
|
476 |
+
rank : 3 (local_rank: 3)
|
477 |
+
exitcode : 2 (pid: 287334)
|
478 |
+
error_file: <N/A>
|
479 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
480 |
+
[4]:
|
481 |
+
time : 2024-12-13_01:07:56
|
482 |
+
host : ip-26-0-169-207.ec2.internal
|
483 |
+
rank : 4 (local_rank: 4)
|
484 |
+
exitcode : 2 (pid: 287335)
|
485 |
+
error_file: <N/A>
|
486 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
487 |
+
[5]:
|
488 |
+
time : 2024-12-13_01:07:56
|
489 |
+
host : ip-26-0-169-207.ec2.internal
|
490 |
+
rank : 5 (local_rank: 5)
|
491 |
+
exitcode : 2 (pid: 287336)
|
492 |
+
error_file: <N/A>
|
493 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
494 |
+
[6]:
|
495 |
+
time : 2024-12-13_01:07:56
|
496 |
+
host : ip-26-0-169-207.ec2.internal
|
497 |
+
rank : 6 (local_rank: 6)
|
498 |
+
exitcode : 2 (pid: 287337)
|
499 |
+
error_file: <N/A>
|
500 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
501 |
+
[7]:
|
502 |
+
time : 2024-12-13_01:07:56
|
503 |
+
host : ip-26-0-169-207.ec2.internal
|
504 |
+
rank : 7 (local_rank: 7)
|
505 |
+
exitcode : 2 (pid: 287338)
|
506 |
+
error_file: <N/A>
|
507 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
508 |
+
------------------------------------------------------------
|
509 |
+
Root Cause (first observed failure):
|
510 |
+
[0]:
|
511 |
+
time : 2024-12-13_01:07:56
|
512 |
+
host : ip-26-0-169-207.ec2.internal
|
513 |
+
rank : 0 (local_rank: 0)
|
514 |
+
exitcode : 2 (pid: 287331)
|
515 |
+
error_file: <N/A>
|
516 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
517 |
+
============================================================
|
518 |
+
srun: error: ip-26-0-169-207: task 0: Exited with exit code 1
|
logs/13417428-bench_stress_test.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13417834-bench_stress_test.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13417884-bench_stress_test.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13418012-bench_stress_test.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13438545-bench_3.57G_dp64_tp1_pp1_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13438869-bench_3.57G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13442395-bench_3.57G_dp1_tp4_pp16_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13443093-bench_80G_dp1_tp4_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,805 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames ip-26-0-165-59
|
400 |
+
+ export NODELIST=ip-26-0-165-59
|
401 |
+
+ NODELIST=ip-26-0-165-59
|
402 |
+
++ scontrol show hostnames ip-26-0-165-59
|
403 |
+
++ head -n1
|
404 |
+
+ export MASTER_NODE=ip-26-0-165-59
|
405 |
+
+ MASTER_NODE=ip-26-0-165-59
|
406 |
+
+ export MASTER_PORT=12356
|
407 |
+
+ MASTER_PORT=12356
|
408 |
+
+ export NNODES=1
|
409 |
+
+ NNODES=1
|
410 |
+
+ export GPUS_PER_NODE=8
|
411 |
+
+ GPUS_PER_NODE=8
|
412 |
+
+ export WORLD_SIZE=8
|
413 |
+
+ WORLD_SIZE=8
|
414 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
415 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
416 |
+
+ export NCCL_DEBUG=WARN
|
417 |
+
+ NCCL_DEBUG=WARN
|
418 |
+
+ export NANOTRON_BENCHMARK=1
|
419 |
+
+ NANOTRON_BENCHMARK=1
|
420 |
+
+ export WANDB_MODE=disabled
|
421 |
+
+ WANDB_MODE=disabled
|
422 |
+
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
423 |
+
+ TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
424 |
+
+ echo '=== GPU Topology ==='
|
425 |
+
=== GPU Topology ===
|
426 |
+
+ nvidia-smi topo -m
|
427 |
+
[4mGPU0 GPU1 GPU2 GPU3 GPU4 GPU5 GPU6 GPU7 CPU Affinity NUMA Affinity GPU NUMA ID[0m
|
428 |
+
GPU0 X NV18 NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
429 |
+
GPU1 NV18 X NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
430 |
+
GPU2 NV18 NV18 X NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
431 |
+
GPU3 NV18 NV18 NV18 X NV18 NV18 NV18 NV18 0-47 0 N/A
|
432 |
+
GPU4 NV18 NV18 NV18 NV18 X NV18 NV18 NV18 48-95 1 N/A
|
433 |
+
GPU5 NV18 NV18 NV18 NV18 NV18 X NV18 NV18 48-95 1 N/A
|
434 |
+
GPU6 NV18 NV18 NV18 NV18 NV18 NV18 X NV18 48-95 1 N/A
|
435 |
+
GPU7 NV18 NV18 NV18 NV18 NV18 NV18 NV18 X 48-95 1 N/A
|
436 |
+
|
437 |
+
Legend:
|
438 |
+
|
439 |
+
X = Self
|
440 |
+
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
|
441 |
+
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
|
442 |
+
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
|
443 |
+
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
|
444 |
+
PIX = Connection traversing at most a single PCIe bridge
|
445 |
+
NV# = Connection traversing a bonded set of # NVLinks
|
446 |
+
+ echo ==================
|
447 |
+
==================
|
448 |
+
+ echo 'Master node: ip-26-0-165-59'
|
449 |
+
Master node: ip-26-0-165-59
|
450 |
+
+ echo 'All nodes: ip-26-0-165-59'
|
451 |
+
All nodes: ip-26-0-165-59
|
452 |
+
+ echo 'World size: 8'
|
453 |
+
World size: 8
|
454 |
+
+ srun --wait=0 --kill-on-bad-exit=1 torchrun --nnodes=1 --nproc_per_node=8 --rdzv_id=13443093 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-165-59:12356 --max_restarts 0 --rdzv_conf timeout=60 /fsx/nouamane/projects/nanotron/run_train.py --config-file benchmark/configs/config_80G_dp1_tp4_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
455 |
+
[2024-12-14 09:21:38,967] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
456 |
+
[2024-12-14 09:21:38,967] torch.distributed.run: [WARNING]
|
457 |
+
[2024-12-14 09:21:38,967] torch.distributed.run: [WARNING] *****************************************
|
458 |
+
[2024-12-14 09:21:38,967] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
459 |
+
[2024-12-14 09:21:38,967] torch.distributed.run: [WARNING] *****************************************
|
460 |
+
NCCL version 2.18.5+cuda12.2
|
461 |
+
12/14/2024 09:21:58 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Measuring inter-GPU and intra-node bandwidth...
|
462 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Bandwidth measurement complete. Time taken: 14.52 seconds
|
463 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Config:
|
464 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Config(general=GeneralArgs(project='debug',
|
465 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: run='80G_dp1_tp4_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k',
|
466 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: seed=42,
|
467 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: step=None,
|
468 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: consumed_train_samples=None,
|
469 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
470 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: ignore_sanity_checks=True),
|
471 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: parallelism=ParallelismArgs(dp=1,
|
472 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pp=2,
|
473 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tp=4,
|
474 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f2e44fa0be0>,
|
475 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
476 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tp_linear_async_communication=True,
|
477 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: recompute_layer=False,
|
478 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tp_recompute_allgather=True,
|
479 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: expert_parallel_size=1),
|
480 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
481 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: eos_token_id=0,
|
482 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: hidden_act='silu',
|
483 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: hidden_size=8192,
|
484 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: initializer_range=0.02,
|
485 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: intermediate_size=28672,
|
486 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: is_llama_config=True,
|
487 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: max_position_embeddings=4096,
|
488 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_attention_heads=64,
|
489 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_hidden_layers=80,
|
490 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_key_value_heads=64,
|
491 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pad_token_id=None,
|
492 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pretraining_tp=1,
|
493 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rms_norm_eps=1e-05,
|
494 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_scaling=None,
|
495 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_theta=10000.0,
|
496 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_interleaved=False,
|
497 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tie_word_embeddings=False,
|
498 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: use_cache=True,
|
499 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: vocab_size=131072),
|
500 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: init_method=RandomInit(std=0.02),
|
501 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: dtype=torch.bfloat16,
|
502 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: make_vocab_size_divisible_by=1,
|
503 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: ddp_bucket_cap_mb=25),
|
504 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
505 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tokenizer_revision=None,
|
506 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tokenizer_max_length=None),
|
507 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
508 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: checkpoint_interval=10000,
|
509 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: save_initial_state=False,
|
510 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: save_final_state=False,
|
511 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: resume_checkpoint_path=None,
|
512 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: checkpoints_path_is_shared_file_system=False),
|
513 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: logging=LoggingArgs(log_level='info',
|
514 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: log_level_replica='info',
|
515 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: iteration_step_info_interval=1),
|
516 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tokens=TokensArgs(sequence_length=4096,
|
517 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: train_steps=100,
|
518 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: micro_batch_size=64,
|
519 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: batch_accumulation_per_replica=4,
|
520 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: val_check_interval=100,
|
521 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: limit_val_batches=0,
|
522 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: limit_test_batches=0),
|
523 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
524 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: adam_beta1=0.9,
|
525 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: adam_beta2=0.95,
|
526 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: torch_adam_is_fused=True,
|
527 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: name='adamW'),
|
528 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: zero_stage=0,
|
529 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: weight_decay=0.01,
|
530 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: clip_grad=1.0,
|
531 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: accumulate_grad_in_fp32=True,
|
532 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
533 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lr_warmup_steps=2,
|
534 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lr_warmup_style='linear',
|
535 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lr_decay_style='cosine',
|
536 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lr_decay_steps=13,
|
537 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lr_decay_starting_step=None,
|
538 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: min_decay_lr=1e-05)),
|
539 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
540 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: start_training_step=1,
|
541 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: data=DataArgs(dataset=None,
|
542 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: seed=42,
|
543 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_loading_workers=1))],
|
544 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: profiler=None,
|
545 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: lighteval=None,
|
546 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: s3_upload=None)
|
547 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Model Config:
|
548 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: LlamaConfig(bos_token_id=0,
|
549 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: eos_token_id=0,
|
550 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: hidden_act='silu',
|
551 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: hidden_size=8192,
|
552 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: initializer_range=0.02,
|
553 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: intermediate_size=28672,
|
554 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: is_llama_config=True,
|
555 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: max_position_embeddings=4096,
|
556 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_attention_heads=64,
|
557 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_hidden_layers=80,
|
558 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: num_key_value_heads=64,
|
559 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pad_token_id=None,
|
560 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: pretraining_tp=1,
|
561 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rms_norm_eps=1e-05,
|
562 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_scaling=None,
|
563 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_theta=10000.0,
|
564 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: rope_interleaved=False,
|
565 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: tie_word_embeddings=False,
|
566 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: use_cache=True,
|
567 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: vocab_size=131072)
|
568 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Building model..
|
569 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Initialize RoPE Theta = 10000.0
|
570 |
+
12/14/2024 09:22:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Setting PP block ranks...
|
571 |
+
NCCL version 2.18.5+cuda12.2
|
572 |
+
NCCL version 2.18.5+cuda12.2
|
573 |
+
NCCL version 2.18.5+cuda12.2
|
574 |
+
NCCL version 2.18.5+cuda12.2
|
575 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Total number of parameters: 80G (152586.06MiB)
|
576 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=0|ip-26-0-165-59]: Local number of parameters: 9.76G (18609.23MiB)
|
577 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Local number of parameters: 10.2G (19537.28MiB)
|
578 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-59]: Local number of parameters: 10.2G (19537.28MiB)
|
579 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=2|ip-26-0-165-59]: Local number of parameters: 9.76G (18609.23MiB)
|
580 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=1|ip-26-0-165-59]: Local number of parameters: 9.76G (18609.23MiB)
|
581 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=3|ip-26-0-165-59]: Local number of parameters: 9.76G (18609.23MiB)
|
582 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-59]: Local number of parameters: 10.2G (19537.28MiB)
|
583 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-59]: Local number of parameters: 10.2G (19537.28MiB)
|
584 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: [After model building] Memory usage: 19537.33MiB. Peak allocated: 19537.33MiB Peak reserved: 19666.00MiB
|
585 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=1|ip-26-0-165-59]: [After model building] Memory usage: 18609.28MiB. Peak allocated: 18609.29MiB Peak reserved: 18754.00MiB
|
586 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=0|ip-26-0-165-59]: [After model building] Memory usage: 18609.28MiB. Peak allocated: 18609.29MiB Peak reserved: 18754.00MiB
|
587 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=2|ip-26-0-165-59]: [After model building] Memory usage: 18609.28MiB. Peak allocated: 18609.29MiB Peak reserved: 18754.00MiB
|
588 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=3|ip-26-0-165-59]: [After model building] Memory usage: 19537.33MiB. Peak allocated: 19537.33MiB Peak reserved: 19666.00MiB
|
589 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=1|TP=3|ip-26-0-165-59]: [After model building] Memory usage: 18609.28MiB. Peak allocated: 18609.29MiB Peak reserved: 18754.00MiB
|
590 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=1|ip-26-0-165-59]: [After model building] Memory usage: 19537.33MiB. Peak allocated: 19537.33MiB Peak reserved: 19666.00MiB
|
591 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=2|ip-26-0-165-59]: [After model building] Memory usage: 19537.33MiB. Peak allocated: 19537.33MiB Peak reserved: 19666.00MiB
|
592 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: No checkpoint path provided.
|
593 |
+
12/14/2024 09:22:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: Parametrizing model parameters using StandardParametrizator
|
594 |
+
NCCL version 2.18.5+cuda12.2
|
595 |
+
NCCL version 2.18.5+cuda12.2
|
596 |
+
NCCL version 2.18.5+cuda12.2
|
597 |
+
12/14/2024 09:22:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-165-59]: [Optimizer Building] Using LearningRateForSP as learning rate
|
598 |
+
Traceback (most recent call last):
|
599 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
600 |
+
Traceback (most recent call last):
|
601 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
602 |
+
trainer = DistributedTrainer(config_file)
|
603 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
604 |
+
trainer = DistributedTrainer(config_file)
|
605 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
606 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
607 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
608 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
609 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
610 |
+
optimizer = optimizer_builder(named_param_groups)
|
611 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
612 |
+
optimizer = optimizer_builder(named_param_groups)
|
613 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
614 |
+
result = OptimizerFromGradientAccumulator(
|
615 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
616 |
+
Traceback (most recent call last):
|
617 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
618 |
+
result = OptimizerFromGradientAccumulator(
|
619 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
620 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
621 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
622 |
+
trainer = DistributedTrainer(config_file)
|
623 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
624 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
625 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
626 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
627 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
628 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
629 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
630 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
631 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
632 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
633 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 38.16 GiB. GPU 1 has a total capacty of 79.33 GiB of which 11.78 GiB is free. Including non-PyTorch memory, this process has 67.54 GiB memory in use. Of the allocated memory 57.24 GiB is allocated by PyTorch, and 130.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
634 |
+
optimizer = optimizer_builder(named_param_groups)
|
635 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
636 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
637 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 36.35 GiB. GPU 6 has a total capacty of 79.33 GiB of which 14.49 GiB is free. Including non-PyTorch memory, this process has 64.83 GiB memory in use. Of the allocated memory 54.52 GiB is allocated by PyTorch, and 146.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
638 |
+
result = OptimizerFromGradientAccumulator(
|
639 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
640 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
641 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
642 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
643 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
644 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
645 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 38.16 GiB. GPU 2 has a total capacty of 79.33 GiB of which 11.78 GiB is free. Including non-PyTorch memory, this process has 67.54 GiB memory in use. Of the allocated memory 57.24 GiB is allocated by PyTorch, and 130.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
646 |
+
Traceback (most recent call last):
|
647 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
648 |
+
trainer = DistributedTrainer(config_file)
|
649 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
650 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
651 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
652 |
+
optimizer = optimizer_builder(named_param_groups)
|
653 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
654 |
+
result = OptimizerFromGradientAccumulator(
|
655 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
656 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
657 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
658 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
659 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
660 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
661 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 38.16 GiB. GPU 0 has a total capacty of 79.33 GiB of which 11.97 GiB is free. Including non-PyTorch memory, this process has 67.35 GiB memory in use. Of the allocated memory 57.24 GiB is allocated by PyTorch, and 130.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
662 |
+
Traceback (most recent call last):
|
663 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
664 |
+
trainer = DistributedTrainer(config_file)
|
665 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
666 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
667 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
668 |
+
optimizer = optimizer_builder(named_param_groups)
|
669 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
670 |
+
result = OptimizerFromGradientAccumulator(
|
671 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
672 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
673 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
674 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
675 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
676 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
677 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 36.35 GiB. GPU 5 has a total capacty of 79.33 GiB of which 14.49 GiB is free. Including non-PyTorch memory, this process has 64.83 GiB memory in use. Of the allocated memory 54.52 GiB is allocated by PyTorch, and 146.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
678 |
+
Traceback (most recent call last):
|
679 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
680 |
+
trainer = DistributedTrainer(config_file)
|
681 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
682 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
683 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
684 |
+
optimizer = optimizer_builder(named_param_groups)
|
685 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
686 |
+
result = OptimizerFromGradientAccumulator(
|
687 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
688 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
689 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
690 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
691 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
692 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
693 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 38.16 GiB. GPU 3 has a total capacty of 79.33 GiB of which 12.02 GiB is free. Including non-PyTorch memory, this process has 67.30 GiB memory in use. Of the allocated memory 57.24 GiB is allocated by PyTorch, and 130.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
694 |
+
Traceback (most recent call last):
|
695 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
696 |
+
trainer = DistributedTrainer(config_file)
|
697 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
698 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
699 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
700 |
+
optimizer = optimizer_builder(named_param_groups)
|
701 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
702 |
+
result = OptimizerFromGradientAccumulator(
|
703 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
704 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
705 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
706 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
707 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
708 |
+
Traceback (most recent call last):
|
709 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
710 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
711 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 36.35 GiB. GPU 4 has a total capacty of 79.33 GiB of which 14.53 GiB is free. Including non-PyTorch memory, this process has 64.79 GiB memory in use. Of the allocated memory 54.52 GiB is allocated by PyTorch, and 146.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
712 |
+
trainer = DistributedTrainer(config_file)
|
713 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
714 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
715 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
716 |
+
optimizer = optimizer_builder(named_param_groups)
|
717 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
718 |
+
result = OptimizerFromGradientAccumulator(
|
719 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
720 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
721 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
722 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
723 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 95, in __init__
|
724 |
+
big_flat_buffer = torch.empty(length, dtype=torch.float, device="cuda")
|
725 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 36.35 GiB. GPU 7 has a total capacty of 79.33 GiB of which 15.42 GiB is free. Including non-PyTorch memory, this process has 63.89 GiB memory in use. Of the allocated memory 54.52 GiB is allocated by PyTorch, and 146.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
726 |
+
[2024-12-14 09:22:34,260] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 197717) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
727 |
+
Traceback (most recent call last):
|
728 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
729 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
730 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
731 |
+
return f(*args, **kwargs)
|
732 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
733 |
+
run(args)
|
734 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
735 |
+
elastic_launch(
|
736 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
737 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
738 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
739 |
+
raise ChildFailedError(
|
740 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
741 |
+
============================================================
|
742 |
+
/fsx/nouamane/projects/nanotron/run_train.py FAILED
|
743 |
+
------------------------------------------------------------
|
744 |
+
Failures:
|
745 |
+
[1]:
|
746 |
+
time : 2024-12-14_09:22:34
|
747 |
+
host : ip-26-0-165-59.ec2.internal
|
748 |
+
rank : 1 (local_rank: 1)
|
749 |
+
exitcode : 1 (pid: 197718)
|
750 |
+
error_file: <N/A>
|
751 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
752 |
+
[2]:
|
753 |
+
time : 2024-12-14_09:22:34
|
754 |
+
host : ip-26-0-165-59.ec2.internal
|
755 |
+
rank : 2 (local_rank: 2)
|
756 |
+
exitcode : 1 (pid: 197719)
|
757 |
+
error_file: <N/A>
|
758 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
759 |
+
[3]:
|
760 |
+
time : 2024-12-14_09:22:34
|
761 |
+
host : ip-26-0-165-59.ec2.internal
|
762 |
+
rank : 3 (local_rank: 3)
|
763 |
+
exitcode : 1 (pid: 197720)
|
764 |
+
error_file: <N/A>
|
765 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
766 |
+
[4]:
|
767 |
+
time : 2024-12-14_09:22:34
|
768 |
+
host : ip-26-0-165-59.ec2.internal
|
769 |
+
rank : 4 (local_rank: 4)
|
770 |
+
exitcode : 1 (pid: 197721)
|
771 |
+
error_file: <N/A>
|
772 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
773 |
+
[5]:
|
774 |
+
time : 2024-12-14_09:22:34
|
775 |
+
host : ip-26-0-165-59.ec2.internal
|
776 |
+
rank : 5 (local_rank: 5)
|
777 |
+
exitcode : 1 (pid: 197722)
|
778 |
+
error_file: <N/A>
|
779 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
780 |
+
[6]:
|
781 |
+
time : 2024-12-14_09:22:34
|
782 |
+
host : ip-26-0-165-59.ec2.internal
|
783 |
+
rank : 6 (local_rank: 6)
|
784 |
+
exitcode : 1 (pid: 197723)
|
785 |
+
error_file: <N/A>
|
786 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
787 |
+
[7]:
|
788 |
+
time : 2024-12-14_09:22:34
|
789 |
+
host : ip-26-0-165-59.ec2.internal
|
790 |
+
rank : 7 (local_rank: 7)
|
791 |
+
exitcode : 1 (pid: 197724)
|
792 |
+
error_file: <N/A>
|
793 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
794 |
+
------------------------------------------------------------
|
795 |
+
Root Cause (first observed failure):
|
796 |
+
[0]:
|
797 |
+
time : 2024-12-14_09:22:34
|
798 |
+
host : ip-26-0-165-59.ec2.internal
|
799 |
+
rank : 0 (local_rank: 0)
|
800 |
+
exitcode : 1 (pid: 197717)
|
801 |
+
error_file: <N/A>
|
802 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
803 |
+
============================================================
|
804 |
+
srun: error: ip-26-0-165-59: task 0: Exited with exit code 1
|
805 |
+
srun: launch/slurm: _step_signal: Terminating StepId=13443093.0
|
logs/13443135-bench_469G_dp1_tp8_pp4_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13443142-bench_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,1042 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames 'ip-26-0-160-[100,242],ip-26-0-161-78,ip-26-0-162-180'
|
400 |
+
+ export 'NODELIST=ip-26-0-160-100
|
401 |
+
ip-26-0-160-242
|
402 |
+
ip-26-0-161-78
|
403 |
+
ip-26-0-162-180'
|
404 |
+
+ NODELIST='ip-26-0-160-100
|
405 |
+
ip-26-0-160-242
|
406 |
+
ip-26-0-161-78
|
407 |
+
ip-26-0-162-180'
|
408 |
+
++ head -n1
|
409 |
+
++ scontrol show hostnames 'ip-26-0-160-[100,242],ip-26-0-161-78,ip-26-0-162-180'
|
410 |
+
+ export MASTER_NODE=ip-26-0-160-100
|
411 |
+
+ MASTER_NODE=ip-26-0-160-100
|
412 |
+
+ export MASTER_PORT=12356
|
413 |
+
+ MASTER_PORT=12356
|
414 |
+
+ export NNODES=4
|
415 |
+
+ NNODES=4
|
416 |
+
+ export GPUS_PER_NODE=8
|
417 |
+
+ GPUS_PER_NODE=8
|
418 |
+
+ export WORLD_SIZE=32
|
419 |
+
+ WORLD_SIZE=32
|
420 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
421 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
422 |
+
+ export NCCL_DEBUG=WARN
|
423 |
+
+ NCCL_DEBUG=WARN
|
424 |
+
+ export NANOTRON_BENCHMARK=1
|
425 |
+
+ NANOTRON_BENCHMARK=1
|
426 |
+
+ export WANDB_MODE=disabled
|
427 |
+
+ WANDB_MODE=disabled
|
428 |
+
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
429 |
+
+ TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
430 |
+
+ echo '=== GPU Topology ==='
|
431 |
+
=== GPU Topology ===
|
432 |
+
+ nvidia-smi topo -m
|
433 |
+
[4mGPU0 GPU1 GPU2 GPU3 GPU4 GPU5 GPU6 GPU7 CPU Affinity NUMA Affinity GPU NUMA ID[0m
|
434 |
+
GPU0 X NV18 NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
435 |
+
GPU1 NV18 X NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
436 |
+
GPU2 NV18 NV18 X NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
437 |
+
GPU3 NV18 NV18 NV18 X NV18 NV18 NV18 NV18 0-47 0 N/A
|
438 |
+
GPU4 NV18 NV18 NV18 NV18 X NV18 NV18 NV18 48-95 1 N/A
|
439 |
+
GPU5 NV18 NV18 NV18 NV18 NV18 X NV18 NV18 48-95 1 N/A
|
440 |
+
GPU6 NV18 NV18 NV18 NV18 NV18 NV18 X NV18 48-95 1 N/A
|
441 |
+
GPU7 NV18 NV18 NV18 NV18 NV18 NV18 NV18 X 48-95 1 N/A
|
442 |
+
|
443 |
+
Legend:
|
444 |
+
|
445 |
+
X = Self
|
446 |
+
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
|
447 |
+
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
|
448 |
+
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
|
449 |
+
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
|
450 |
+
PIX = Connection traversing at most a single PCIe bridge
|
451 |
+
NV# = Connection traversing a bonded set of # NVLinks
|
452 |
+
+ echo ==================
|
453 |
+
==================
|
454 |
+
+ echo 'Master node: ip-26-0-160-100'
|
455 |
+
Master node: ip-26-0-160-100
|
456 |
+
+ echo 'All nodes: ip-26-0-160-100
|
457 |
+
ip-26-0-160-242
|
458 |
+
ip-26-0-161-78
|
459 |
+
ip-26-0-162-180'
|
460 |
+
All nodes: ip-26-0-160-100
|
461 |
+
ip-26-0-160-242
|
462 |
+
ip-26-0-161-78
|
463 |
+
ip-26-0-162-180
|
464 |
+
+ echo 'World size: 32'
|
465 |
+
World size: 32
|
466 |
+
+ srun --wait=0 --kill-on-bad-exit=1 torchrun --nnodes=4 --nproc_per_node=8 --rdzv_id=13443142 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-160-100:12356 --max_restarts 0 --rdzv_conf timeout=60 /fsx/nouamane/projects/nanotron/run_train.py --config-file benchmark/configs/config_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
467 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
468 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING]
|
469 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING] *****************************************
|
470 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
471 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING] *****************************************
|
472 |
+
[2024-12-14 10:25:58,072] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
473 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
474 |
+
[2024-12-14 10:25:58,108] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
475 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING]
|
476 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] *****************************************
|
477 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
478 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] *****************************************
|
479 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING]
|
480 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] *****************************************
|
481 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
482 |
+
[2024-12-14 10:25:58,073] torch.distributed.run: [WARNING] *****************************************
|
483 |
+
[2024-12-14 10:25:58,108] torch.distributed.run: [WARNING]
|
484 |
+
[2024-12-14 10:25:58,108] torch.distributed.run: [WARNING] *****************************************
|
485 |
+
[2024-12-14 10:25:58,108] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
486 |
+
[2024-12-14 10:25:58,108] torch.distributed.run: [WARNING] *****************************************
|
487 |
+
NCCL version 2.18.5+cuda12.2
|
488 |
+
12/14/2024 10:26:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Measuring inter-GPU and intra-node bandwidth...
|
489 |
+
NCCL version 2.18.5+cuda12.2
|
490 |
+
NCCL version 2.18.5+cuda12.2
|
491 |
+
NCCL version 2.18.5+cuda12.2
|
492 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Bandwidth measurement complete. Time taken: 17.68 seconds
|
493 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Config:
|
494 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Config(general=GeneralArgs(project='debug',
|
495 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: run='80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k',
|
496 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: seed=42,
|
497 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: step=None,
|
498 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: consumed_train_samples=None,
|
499 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
500 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: ignore_sanity_checks=True),
|
501 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: parallelism=ParallelismArgs(dp=1,
|
502 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pp=2,
|
503 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tp=16,
|
504 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fb0b5e90e20>,
|
505 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
506 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tp_linear_async_communication=True,
|
507 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: recompute_layer=False,
|
508 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tp_recompute_allgather=True,
|
509 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: expert_parallel_size=1),
|
510 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
511 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: eos_token_id=0,
|
512 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: hidden_act='silu',
|
513 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: hidden_size=8192,
|
514 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: initializer_range=0.02,
|
515 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: intermediate_size=28672,
|
516 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: is_llama_config=True,
|
517 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: max_position_embeddings=4096,
|
518 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_attention_heads=64,
|
519 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_hidden_layers=80,
|
520 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_key_value_heads=64,
|
521 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pad_token_id=None,
|
522 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pretraining_tp=1,
|
523 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rms_norm_eps=1e-05,
|
524 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_scaling=None,
|
525 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_theta=10000.0,
|
526 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_interleaved=False,
|
527 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tie_word_embeddings=False,
|
528 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: use_cache=True,
|
529 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: vocab_size=131072),
|
530 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: init_method=RandomInit(std=0.02),
|
531 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: dtype=torch.bfloat16,
|
532 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: make_vocab_size_divisible_by=1,
|
533 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: ddp_bucket_cap_mb=25),
|
534 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
535 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tokenizer_revision=None,
|
536 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tokenizer_max_length=None),
|
537 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
538 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: checkpoint_interval=10000,
|
539 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: save_initial_state=False,
|
540 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: save_final_state=False,
|
541 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: resume_checkpoint_path=None,
|
542 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: checkpoints_path_is_shared_file_system=False),
|
543 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: logging=LoggingArgs(log_level='info',
|
544 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: log_level_replica='info',
|
545 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: iteration_step_info_interval=1),
|
546 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tokens=TokensArgs(sequence_length=4096,
|
547 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: train_steps=100,
|
548 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: micro_batch_size=32,
|
549 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: batch_accumulation_per_replica=8,
|
550 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: val_check_interval=100,
|
551 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: limit_val_batches=0,
|
552 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: limit_test_batches=0),
|
553 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
554 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: adam_beta1=0.9,
|
555 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: adam_beta2=0.95,
|
556 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: torch_adam_is_fused=True,
|
557 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: name='adamW'),
|
558 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: zero_stage=0,
|
559 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: weight_decay=0.01,
|
560 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: clip_grad=1.0,
|
561 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: accumulate_grad_in_fp32=True,
|
562 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
563 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lr_warmup_steps=2,
|
564 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lr_warmup_style='linear',
|
565 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lr_decay_style='cosine',
|
566 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lr_decay_steps=13,
|
567 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lr_decay_starting_step=None,
|
568 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: min_decay_lr=1e-05)),
|
569 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
570 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: start_training_step=1,
|
571 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: data=DataArgs(dataset=None,
|
572 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: seed=42,
|
573 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_loading_workers=1))],
|
574 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: profiler=None,
|
575 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: lighteval=None,
|
576 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: s3_upload=None)
|
577 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Model Config:
|
578 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: LlamaConfig(bos_token_id=0,
|
579 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: eos_token_id=0,
|
580 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: hidden_act='silu',
|
581 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: hidden_size=8192,
|
582 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: initializer_range=0.02,
|
583 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: intermediate_size=28672,
|
584 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: is_llama_config=True,
|
585 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: max_position_embeddings=4096,
|
586 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_attention_heads=64,
|
587 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_hidden_layers=80,
|
588 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: num_key_value_heads=64,
|
589 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pad_token_id=None,
|
590 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: pretraining_tp=1,
|
591 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rms_norm_eps=1e-05,
|
592 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_scaling=None,
|
593 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_theta=10000.0,
|
594 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: rope_interleaved=False,
|
595 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: tie_word_embeddings=False,
|
596 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: use_cache=True,
|
597 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: vocab_size=131072)
|
598 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Building model..
|
599 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Initialize RoPE Theta = 10000.0
|
600 |
+
12/14/2024 10:26:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Setting PP block ranks...
|
601 |
+
NCCL version 2.18.5+cuda12.2
|
602 |
+
NCCL version 2.18.5+cuda12.2
|
603 |
+
NCCL version 2.18.5+cuda12.2
|
604 |
+
NCCL version 2.18.5+cuda12.2
|
605 |
+
NCCL version 2.18.5+cuda12.2
|
606 |
+
NCCL version 2.18.5+cuda12.2
|
607 |
+
NCCL version 2.18.5+cuda12.2
|
608 |
+
NCCL version 2.18.5+cuda12.2
|
609 |
+
NCCL version 2.18.5+cuda12.2
|
610 |
+
NCCL version 2.18.5+cuda12.2
|
611 |
+
NCCL version 2.18.5+cuda12.2
|
612 |
+
NCCL version 2.18.5+cuda12.2
|
613 |
+
NCCL version 2.18.5+cuda12.2
|
614 |
+
NCCL version 2.18.5+cuda12.2
|
615 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=0|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
616 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Total number of parameters: 80G (152616.25MiB)
|
617 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
618 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=0|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
619 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
620 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: No checkpoint path provided.
|
621 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Parametrizing model parameters using StandardParametrizator
|
622 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=14|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
623 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=12|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
624 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=14|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
625 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=12|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
626 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=8|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
627 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=14|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 21826.00MiB
|
628 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=13|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
629 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=8|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
630 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
631 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
632 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=12|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 19778.00MiB
|
633 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=15|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
634 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=13|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
635 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=15|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
636 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=12|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
637 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=14|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
638 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=3|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
639 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=4|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
640 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=8|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 19778.00MiB
|
641 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=8|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
642 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=13|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 20802.00MiB
|
643 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
644 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
645 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=10|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
646 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=10|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
647 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=13|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
648 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
649 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=15|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 21826.00MiB
|
650 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=15|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
651 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=1|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
652 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=3|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
653 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=4|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
654 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=9|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
655 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=9|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
656 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=10|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 20802.00MiB
|
657 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=10|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
658 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
659 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=1|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
660 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
661 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=11|ip-26-0-162-180]: Local number of parameters: 2.44G (4653.23MiB)
|
662 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=5|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
663 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=9|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
664 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
665 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=9|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 21826.00MiB
|
666 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=2|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
667 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
668 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=6|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
669 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=11|ip-26-0-162-180]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
670 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
671 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=11|ip-26-0-160-242]: Local number of parameters: 2.56G (4885.28MiB)
|
672 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=5|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
673 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-100]: Local number of parameters: 2.56G (4885.28MiB)
|
674 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=7|ip-26-0-161-78]: Local number of parameters: 2.44G (4653.23MiB)
|
675 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
676 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=2|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
677 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
678 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=6|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
679 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-100]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
680 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=1|TP=7|ip-26-0-161-78]: [After model building] Memory usage: 4653.28MiB. Peak allocated: 5440.00MiB Peak reserved: 22850.00MiB
|
681 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=11|ip-26-0-160-242]: [After model building] Memory usage: 4885.33MiB. Peak allocated: 5440.00MiB Peak reserved: 20802.00MiB
|
682 |
+
NCCL version 2.18.5+cuda12.2
|
683 |
+
NCCL version 2.18.5+cuda12.2
|
684 |
+
NCCL version 2.18.5+cuda12.2
|
685 |
+
NCCL version 2.18.5+cuda12.2
|
686 |
+
NCCL version 2.18.5+cuda12.2
|
687 |
+
NCCL version 2.18.5+cuda12.2
|
688 |
+
NCCL version 2.18.5+cuda12.2
|
689 |
+
NCCL version 2.18.5+cuda12.2
|
690 |
+
NCCL version 2.18.5+cuda12.2
|
691 |
+
NCCL version 2.18.5+cuda12.2
|
692 |
+
NCCL version 2.18.5+cuda12.2
|
693 |
+
NCCL version 2.18.5+cuda12.2
|
694 |
+
NCCL version 2.18.5+cuda12.2
|
695 |
+
NCCL version 2.18.5+cuda12.2
|
696 |
+
12/14/2024 10:26:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [Optimizer Building] Using LearningRateForSP as learning rate
|
697 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
698 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Using dummy data generator
|
699 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [Training Plan] There are 1 training stages
|
700 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [Stage Stable Training Stage] start from step 1
|
701 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]:
|
702 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: [Start training] datetime: 2024-12-14 10:26:49.379423 | mbs: 32 | grad_accum: 8 | global_batch_size: 256 | sequence_length: 4096 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
703 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
704 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
705 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
706 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
707 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
708 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
709 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
710 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
711 |
+
12/14/2024 10:26:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-100]: Memory usage: 24426.46MiB. Peak allocated 24426.46MiB. Peak reserved: 42394.00MiB
|
712 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
713 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
714 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
715 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
716 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
717 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
718 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
719 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
720 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
721 |
+
Traceback (most recent call last):
|
722 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 252, in <module>
|
723 |
+
trainer.train(dataloader)
|
724 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
725 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
726 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
727 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
728 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 258, in train_batch_iter
|
729 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
730 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
731 |
+
output = model(**micro_batch)
|
732 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
733 |
+
return self._call_impl(*args, **kwargs)
|
734 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
735 |
+
return forward_call(*args, **kwargs)
|
736 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
737 |
+
sharded_logits = self.model(
|
738 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
739 |
+
return self._call_impl(*args, **kwargs)
|
740 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
741 |
+
return forward_call(*args, **kwargs)
|
742 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
743 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
744 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
745 |
+
Traceback (most recent call last):
|
746 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 252, in <module>
|
747 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
748 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
749 |
+
trainer.train(dataloader)
|
750 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
751 |
+
return self._call_impl(*args, **kwargs)
|
752 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
753 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
754 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
755 |
+
return forward_call(*args, **kwargs)
|
756 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
757 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
758 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 258, in train_batch_iter
|
759 |
+
output = self.pp_block(**new_kwargs)
|
760 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
761 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
762 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
763 |
+
return self._call_impl(*args, **kwargs)
|
764 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
765 |
+
output = model(**micro_batch)
|
766 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
767 |
+
return forward_call(*args, **kwargs)
|
768 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
769 |
+
return self._call_impl(*args, **kwargs)
|
770 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
771 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
772 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
773 |
+
return forward_call(*args, **kwargs)
|
774 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
775 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
776 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
777 |
+
Traceback (most recent call last):
|
778 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 252, in <module>
|
779 |
+
sharded_logits = self.model(
|
780 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
781 |
+
return self._call_impl(*args, **kwargs)
|
782 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
783 |
+
trainer.train(dataloader)
|
784 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
785 |
+
return self._call_impl(*args, **kwargs)
|
786 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
787 |
+
return forward_call(*args, **kwargs)
|
788 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
789 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
790 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
791 |
+
return forward_call(*args, **kwargs)
|
792 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
793 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
794 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
795 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
796 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 258, in train_batch_iter
|
797 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
798 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
799 |
+
return self._call_impl(*args, **kwargs)
|
800 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
801 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
802 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
803 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
804 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
805 |
+
return forward_call(*args, **kwargs)
|
806 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
807 |
+
output = model(**micro_batch)
|
808 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
809 |
+
return self._call_impl(*args, **kwargs)
|
810 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
811 |
+
return row_linear(
|
812 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
813 |
+
return self._call_impl(*args, **kwargs)
|
814 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
815 |
+
return forward_call(*args, **kwargs)
|
816 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
817 |
+
return forward_call(*args, **kwargs)
|
818 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
819 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
820 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
821 |
+
output = self.pp_block(**new_kwargs)
|
822 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
823 |
+
sharded_logits = self.model(
|
824 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
825 |
+
return self._call_impl(*args, **kwargs)
|
826 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
827 |
+
return self._call_impl(*args, **kwargs)
|
828 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
829 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
830 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
831 |
+
return forward_call(*args, **kwargs)
|
832 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
833 |
+
out = F.linear(tensor, weight, bias)
|
834 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 4 has a total capacty of 79.33 GiB of which 987.94 MiB is free. Including non-PyTorch memory, this process has 78.35 GiB memory in use. Of the allocated memory 65.05 GiB is allocated by PyTorch, and 1.68 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
835 |
+
return forward_call(*args, **kwargs)
|
836 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
837 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
838 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
839 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
840 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
841 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
842 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
843 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
844 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
845 |
+
return self._call_impl(*args, **kwargs)
|
846 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
847 |
+
return self._call_impl(*args, **kwargs)
|
848 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
849 |
+
return forward_call(*args, **kwargs)
|
850 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
851 |
+
return forward_call(*args, **kwargs)
|
852 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
853 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
854 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
855 |
+
output = self.pp_block(**new_kwargs)
|
856 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
857 |
+
return self._call_impl(*args, **kwargs)
|
858 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
859 |
+
return self._call_impl(*args, **kwargs)
|
860 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
861 |
+
return forward_call(*args, **kwargs)
|
862 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
863 |
+
return forward_call(*args, **kwargs)
|
864 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
865 |
+
return row_linear(
|
866 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
867 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
868 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
869 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
870 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
871 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
872 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
873 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
874 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
875 |
+
return self._call_impl(*args, **kwargs)
|
876 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
877 |
+
out = F.linear(tensor, weight, bias)
|
878 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 3 has a total capacty of 79.33 GiB of which 911.94 MiB is free. Including non-PyTorch memory, this process has 78.43 GiB memory in use. Of the allocated memory 65.05 GiB is allocated by PyTorch, and 1.68 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
879 |
+
return forward_call(*args, **kwargs)
|
880 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
881 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
882 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
883 |
+
return self._call_impl(*args, **kwargs)
|
884 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
885 |
+
return forward_call(*args, **kwargs)
|
886 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
887 |
+
return row_linear(
|
888 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
889 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
890 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
891 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
892 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
893 |
+
out = F.linear(tensor, weight, bias)
|
894 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 6 has a total capacty of 79.33 GiB of which 1.86 GiB is free. Including non-PyTorch memory, this process has 77.46 GiB memory in use. Of the allocated memory 65.05 GiB is allocated by PyTorch, and 823.46 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
895 |
+
[2024-12-14 10:27:14,464] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 61863 closing signal SIGTERM
|
896 |
+
[2024-12-14 10:27:14,464] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 61864 closing signal SIGTERM
|
897 |
+
[2024-12-14 10:27:14,464] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 61865 closing signal SIGTERM
|
898 |
+
[2024-12-14 10:27:14,465] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 61868 closing signal SIGTERM
|
899 |
+
[2024-12-14 10:27:14,466] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 61870 closing signal SIGTERM
|
900 |
+
[2024-12-14 10:27:33,701] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 3 (pid: 61866) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
901 |
+
Traceback (most recent call last):
|
902 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
903 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
904 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
905 |
+
return f(*args, **kwargs)
|
906 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
907 |
+
run(args)
|
908 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
909 |
+
elastic_launch(
|
910 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
911 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
912 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
913 |
+
raise ChildFailedError(
|
914 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
915 |
+
============================================================
|
916 |
+
/fsx/nouamane/projects/nanotron/run_train.py FAILED
|
917 |
+
------------------------------------------------------------
|
918 |
+
Failures:
|
919 |
+
[1]:
|
920 |
+
time : 2024-12-14_10:27:14
|
921 |
+
host : ip-26-0-160-242.ec2.internal
|
922 |
+
rank : 12 (local_rank: 4)
|
923 |
+
exitcode : 1 (pid: 61867)
|
924 |
+
error_file: <N/A>
|
925 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
926 |
+
[2]:
|
927 |
+
time : 2024-12-14_10:27:14
|
928 |
+
host : ip-26-0-160-242.ec2.internal
|
929 |
+
rank : 14 (local_rank: 6)
|
930 |
+
exitcode : 1 (pid: 61869)
|
931 |
+
error_file: <N/A>
|
932 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
933 |
+
------------------------------------------------------------
|
934 |
+
Root Cause (first observed failure):
|
935 |
+
[0]:
|
936 |
+
time : 2024-12-14_10:27:14
|
937 |
+
host : ip-26-0-160-242.ec2.internal
|
938 |
+
rank : 11 (local_rank: 3)
|
939 |
+
exitcode : 1 (pid: 61866)
|
940 |
+
error_file: <N/A>
|
941 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
942 |
+
============================================================
|
943 |
+
srun: error: ip-26-0-160-242: task 1: Exited with exit code 1
|
944 |
+
srun: launch/slurm: _step_signal: Terminating StepId=13443142.0
|
945 |
+
slurmstepd: error: *** STEP 13443142.0 ON ip-26-0-160-100 CANCELLED AT 2024-12-14T10:27:35 ***
|
946 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
947 |
+
[2024-12-14 10:27:35,015] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
948 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22795 closing signal SIGTERM
|
949 |
+
[2024-12-14 10:27:35,015] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83008 closing signal SIGTERM
|
950 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22796 closing signal SIGTERM
|
951 |
+
[2024-12-14 10:27:35,015] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83009 closing signal SIGTERM
|
952 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22797 closing signal SIGTERM
|
953 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22798 closing signal SIGTERM
|
954 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83010 closing signal SIGTERM
|
955 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
956 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83011 closing signal SIGTERM
|
957 |
+
[2024-12-14 10:27:35,017] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83012 closing signal SIGTERM
|
958 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112079 closing signal SIGTERM
|
959 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112080 closing signal SIGTERM
|
960 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112081 closing signal SIGTERM
|
961 |
+
[2024-12-14 10:27:35,016] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112082 closing signal SIGTERM
|
962 |
+
[2024-12-14 10:27:35,018] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83013 closing signal SIGTERM
|
963 |
+
[2024-12-14 10:27:35,018] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83014 closing signal SIGTERM
|
964 |
+
[2024-12-14 10:27:35,018] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 83015 closing signal SIGTERM
|
965 |
+
[2024-12-14 10:27:35,018] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22799 closing signal SIGTERM
|
966 |
+
[2024-12-14 10:27:35,017] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112083 closing signal SIGTERM
|
967 |
+
[2024-12-14 10:27:35,018] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112084 closing signal SIGTERM
|
968 |
+
[2024-12-14 10:27:35,019] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22800 closing signal SIGTERM
|
969 |
+
[2024-12-14 10:27:35,019] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22801 closing signal SIGTERM
|
970 |
+
[2024-12-14 10:27:35,019] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112085 closing signal SIGTERM
|
971 |
+
[2024-12-14 10:27:35,019] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 22802 closing signal SIGTERM
|
972 |
+
[2024-12-14 10:27:35,019] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 112086 closing signal SIGTERM
|
973 |
+
Traceback (most recent call last):
|
974 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
975 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
976 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
977 |
+
return f(*args, **kwargs)
|
978 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
979 |
+
run(args)
|
980 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
981 |
+
elastic_launch(
|
982 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
983 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
984 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
985 |
+
result = agent.run()
|
986 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
987 |
+
result = f(*args, **kwargs)
|
988 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
989 |
+
result = self._invoke_run(role)
|
990 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 877, in _invoke_run
|
991 |
+
time.sleep(monitor_interval)
|
992 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
993 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
994 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 22722 got signal: 15
|
995 |
+
srun: error: ip-26-0-160-100: task 0: Exited with exit code 1
|
996 |
+
Traceback (most recent call last):
|
997 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
998 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
999 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
1000 |
+
return f(*args, **kwargs)
|
1001 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
1002 |
+
run(args)
|
1003 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
1004 |
+
elastic_launch(
|
1005 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
1006 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
1007 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
1008 |
+
result = agent.run()
|
1009 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
1010 |
+
result = f(*args, **kwargs)
|
1011 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
1012 |
+
result = self._invoke_run(role)
|
1013 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 877, in _invoke_run
|
1014 |
+
time.sleep(monitor_interval)
|
1015 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
1016 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
1017 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 82938 got signal: 15
|
1018 |
+
srun: error: ip-26-0-162-180: task 3: Exited with exit code 1
|
1019 |
+
[2024-12-14 10:27:59,331] torch.distributed.elastic.rendezvous.dynamic_rendezvous: [WARNING] The node 'ip-26-0-161-78.ec2.internal_112009_0' has failed to send a keep-alive heartbeat to the rendezvous '13443142' due to an error of type RendezvousConnectionError.
|
1020 |
+
Traceback (most recent call last):
|
1021 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
1022 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
1023 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
1024 |
+
return f(*args, **kwargs)
|
1025 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
1026 |
+
run(args)
|
1027 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
1028 |
+
elastic_launch(
|
1029 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
1030 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
1031 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
1032 |
+
result = agent.run()
|
1033 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
1034 |
+
result = f(*args, **kwargs)
|
1035 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
1036 |
+
result = self._invoke_run(role)
|
1037 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 877, in _invoke_run
|
1038 |
+
time.sleep(monitor_interval)
|
1039 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
1040 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
1041 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 112009 got signal: 15
|
1042 |
+
srun: error: ip-26-0-161-78: task 2: Exited with exit code 1
|
logs/13443224-bench_469G_dp1_tp4_pp4_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,1008 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
57 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
58 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
59 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
60 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
61 |
+
test 0;'
|
62 |
+
+++ _mlret=0
|
63 |
+
+++ '[' -n x ']'
|
64 |
+
+++ IFS='
|
65 |
+
'
|
66 |
+
+++ unset _mlIFS
|
67 |
+
+++ unset _mlre _mlv _mlrv
|
68 |
+
+++ '[' 0 -eq 0 ']'
|
69 |
+
+++ eval 'module() {
|
70 |
+
unset _mlshdbg;
|
71 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
72 |
+
case "$-" in
|
73 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
74 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
75 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
76 |
+
*) _mlshdbg='\'''\'' ;;
|
77 |
+
esac;
|
78 |
+
fi;
|
79 |
+
unset _mlre _mlIFS;
|
80 |
+
if [ -n "${IFS+x}" ]; then
|
81 |
+
_mlIFS=$IFS;
|
82 |
+
fi;
|
83 |
+
IFS='\'' '\'';
|
84 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
85 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
86 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
87 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
90 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
91 |
+
fi;
|
92 |
+
done;
|
93 |
+
if [ -n "${_mlre:-}" ]; then
|
94 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
95 |
+
else
|
96 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
97 |
+
fi;
|
98 |
+
_mlstatus=$?;
|
99 |
+
if [ -n "${_mlIFS+x}" ]; then
|
100 |
+
IFS=$_mlIFS;
|
101 |
+
else
|
102 |
+
unset IFS;
|
103 |
+
fi;
|
104 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
105 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
106 |
+
set -$_mlshdbg;
|
107 |
+
fi;
|
108 |
+
unset _mlshdbg;
|
109 |
+
return $_mlstatus;
|
110 |
+
};
|
111 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
112 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
113 |
+
MODULEPATH_modshare=/etc/environment-modules/modules:1:/usr/share/modules/\$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1; export MODULEPATH_modshare;
|
114 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
115 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
116 |
+
LOADEDMODULES=; export LOADEDMODULES;
|
117 |
+
MODULEPATH=/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/\$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles; export MODULEPATH;
|
118 |
+
test 0;'
|
119 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
120 |
+
++++ export MODULES_CMD
|
121 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
122 |
+
++++ export ENV
|
123 |
+
++++ MODULEPATH_modshare='/etc/environment-modules/modules:1:/usr/share/modules/$MODULE_VERSION/modulefiles:1:/usr/share/modules/modulefiles:1:/usr/share/modules/versions:1'
|
124 |
+
++++ export MODULEPATH_modshare
|
125 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
126 |
+
++++ export BASH_ENV
|
127 |
+
++++ MODULESHOME=/usr/share/modules
|
128 |
+
++++ export MODULESHOME
|
129 |
+
++++ LOADEDMODULES=
|
130 |
+
++++ export LOADEDMODULES
|
131 |
+
++++ MODULEPATH='/etc/environment-modules/modules:/usr/share/modules/versions:/usr/share/modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles'
|
132 |
+
++++ export MODULEPATH
|
133 |
+
++++ test 0
|
134 |
+
+++ '[' 0 = 1 ']'
|
135 |
+
+++ '[' -t 2 ']'
|
136 |
+
+++ export -f module
|
137 |
+
+++ export -f switchml
|
138 |
+
+++ '[' 5 -ge 3 ']'
|
139 |
+
+++ [[ ehxB =~ i ]]
|
140 |
+
+++ [[ ! :/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin: =~ :/usr/bin: ]]
|
141 |
+
+++ '[' '!' -n '' ']'
|
142 |
+
+++ MANPATH=:
|
143 |
+
+++ export MANPATH
|
144 |
+
++++ manpath
|
145 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/opt/slurm/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
146 |
+
+++ unset _mlcode _mlret
|
147 |
+
+++ '[' -n '' ']'
|
148 |
+
+ module load cuda/12.1
|
149 |
+
+ unset _mlshdbg
|
150 |
+
+ '[' 0 = 1 ']'
|
151 |
+
+ unset _mlre _mlIFS
|
152 |
+
+ '[' -n x ']'
|
153 |
+
+ _mlIFS='
|
154 |
+
'
|
155 |
+
+ IFS=' '
|
156 |
+
+ '[' -n '' ']'
|
157 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
158 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
159 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:;' export 'LD_LIBRARY_PATH;
|
160 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
161 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
162 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
163 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
164 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
165 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
166 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
167 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
168 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
169 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
170 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
171 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
172 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
173 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
174 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
175 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
176 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
177 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
178 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
179 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
180 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
181 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin;' export 'PATH;
|
182 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
183 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
184 |
+
test' '0;'
|
185 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
186 |
+
++ export CPATH
|
187 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:
|
188 |
+
++ export LD_LIBRARY_PATH
|
189 |
+
++ FI_EFA_FORK_SAFE=1
|
190 |
+
++ export FI_EFA_FORK_SAFE
|
191 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
192 |
+
++ export MANPATH
|
193 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
194 |
+
++ export LIBRARY_PATH
|
195 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
196 |
+
++ export _LMFILES_
|
197 |
+
++ LOADEDMODULES=cuda/12.1
|
198 |
+
++ export LOADEDMODULES
|
199 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
200 |
+
++ export MPI_PATH
|
201 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
202 |
+
++ export NCCL_HOME_modshare
|
203 |
+
++ NCCL_PROTO=simple
|
204 |
+
++ export NCCL_PROTO
|
205 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
206 |
+
++ export MANPATH_modshare
|
207 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
208 |
+
++ export LIBRARY_PATH_modshare
|
209 |
+
++ NCCL_SOCKET_IFNAME=enp
|
210 |
+
++ export NCCL_SOCKET_IFNAME
|
211 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
212 |
+
++ export AWS_OFI_NCCL_HOME
|
213 |
+
++ NCCL_HOME=/opt/nccl/build
|
214 |
+
++ export NCCL_HOME
|
215 |
+
++ FI_PROVIDER=efa
|
216 |
+
++ export FI_PROVIDER
|
217 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
218 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
219 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
220 |
+
++ export CPATH_modshare
|
221 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2::1:/opt/nccl/build/lib:1:/opt/aws-ofi-nccl/lib:2:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/efa/lib:1
|
222 |
+
++ export LD_LIBRARY_PATH_modshare
|
223 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
224 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
225 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
226 |
+
++ export _LMFILES__modshare
|
227 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
228 |
+
++ export LOADEDMODULES_modshare
|
229 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
230 |
+
++ export MPI_PATH_modshare
|
231 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
232 |
+
++ export PATH
|
233 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
234 |
+
++ export CUDA_HOME
|
235 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
236 |
+
++ export PATH_modshare
|
237 |
+
++ test 0
|
238 |
+
+ _mlstatus=0
|
239 |
+
+ '[' -n x ']'
|
240 |
+
+ IFS='
|
241 |
+
'
|
242 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
243 |
+
+ '[' -n '' ']'
|
244 |
+
+ unset _mlshdbg
|
245 |
+
+ return 0
|
246 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
247 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
248 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
249 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
250 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
251 |
+
+++ export _CE_M=
|
252 |
+
+++ _CE_M=
|
253 |
+
+++ export _CE_CONDA=
|
254 |
+
+++ _CE_CONDA=
|
255 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
256 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
257 |
+
+++ '[' -z x ']'
|
258 |
+
++ conda activate
|
259 |
+
++ local cmd=activate
|
260 |
+
++ case "$cmd" in
|
261 |
+
++ __conda_activate activate
|
262 |
+
++ '[' -n '' ']'
|
263 |
+
++ local ask_conda
|
264 |
+
+++ PS1=
|
265 |
+
+++ __conda_exe shell.posix activate
|
266 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
267 |
+
++ ask_conda='. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
268 |
+
PS1='\''(base) '\''
|
269 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
270 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
271 |
+
export CONDA_SHLVL='\''3'\''
|
272 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
273 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
274 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
275 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
276 |
+
export _CE_M='\'''\''
|
277 |
+
export _CE_CONDA='\'''\''
|
278 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
279 |
+
++ eval '. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh"
|
280 |
+
PS1='\''(base) '\''
|
281 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
282 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda'\''
|
283 |
+
export CONDA_SHLVL='\''3'\''
|
284 |
+
export CONDA_DEFAULT_ENV='\''base'\''
|
285 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
286 |
+
export CONDA_PREFIX_2='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
287 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
288 |
+
export _CE_M='\'''\''
|
289 |
+
export _CE_CONDA='\'''\''
|
290 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\'''
|
291 |
+
+++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/deactivate.d/libxml2_deactivate.sh
|
292 |
+
++++ test -n ''
|
293 |
+
++++ unset XML_CATALOG_FILES
|
294 |
+
++++ unset xml_catalog_files_libxml2
|
295 |
+
+++ PS1='(base) '
|
296 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
297 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
298 |
+
+++ export CONDA_PREFIX=/fsx/nouamane/miniconda
|
299 |
+
+++ CONDA_PREFIX=/fsx/nouamane/miniconda
|
300 |
+
+++ export CONDA_SHLVL=3
|
301 |
+
+++ CONDA_SHLVL=3
|
302 |
+
+++ export CONDA_DEFAULT_ENV=base
|
303 |
+
+++ CONDA_DEFAULT_ENV=base
|
304 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
305 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
306 |
+
+++ export CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
307 |
+
+++ CONDA_PREFIX_2=/fsx/nouamane/miniconda/envs/2-1-cu121
|
308 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
309 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
310 |
+
+++ export _CE_M=
|
311 |
+
+++ _CE_M=
|
312 |
+
+++ export _CE_CONDA=
|
313 |
+
+++ _CE_CONDA=
|
314 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
315 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
316 |
+
++ __conda_hashr
|
317 |
+
++ '[' -n '' ']'
|
318 |
+
++ '[' -n '' ']'
|
319 |
+
++ hash -r
|
320 |
+
+ conda activate 2-1-cu121
|
321 |
+
+ local cmd=activate
|
322 |
+
+ case "$cmd" in
|
323 |
+
+ __conda_activate activate 2-1-cu121
|
324 |
+
+ '[' -n '' ']'
|
325 |
+
+ local ask_conda
|
326 |
+
++ PS1='(base) '
|
327 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
328 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
329 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
330 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
331 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
332 |
+
export CONDA_SHLVL='\''4'\''
|
333 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
334 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
335 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
336 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
337 |
+
export _CE_M='\'''\''
|
338 |
+
export _CE_CONDA='\'''\''
|
339 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
340 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
341 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
342 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin'\''
|
343 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
344 |
+
export CONDA_SHLVL='\''4'\''
|
345 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
346 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
347 |
+
export CONDA_PREFIX_3='\''/fsx/nouamane/miniconda'\''
|
348 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
349 |
+
export _CE_M='\'''\''
|
350 |
+
export _CE_CONDA='\'''\''
|
351 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
352 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
353 |
+
++ PS1='(2-1-cu121) '
|
354 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
355 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
356 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
357 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
358 |
+
++ export CONDA_SHLVL=4
|
359 |
+
++ CONDA_SHLVL=4
|
360 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
361 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
362 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
363 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
364 |
+
++ export CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
365 |
+
++ CONDA_PREFIX_3=/fsx/nouamane/miniconda
|
366 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
367 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
368 |
+
++ export _CE_M=
|
369 |
+
++ _CE_M=
|
370 |
+
++ export _CE_CONDA=
|
371 |
+
++ _CE_CONDA=
|
372 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
373 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
374 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
375 |
+
+++ test -n ''
|
376 |
+
+++ xml_catalog_files_libxml2=
|
377 |
+
+++ XML_CATALOG_FILES=
|
378 |
+
+++ conda_catalog_files=
|
379 |
+
+++ ifs_libxml2='
|
380 |
+
'
|
381 |
+
+++ IFS=' '
|
382 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
383 |
+
+++ for pre in ${rem}
|
384 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
385 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
386 |
+
+++ rem=
|
387 |
+
+++ IFS='
|
388 |
+
'
|
389 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
390 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
391 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
392 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
393 |
+
+ __conda_hashr
|
394 |
+
+ '[' -n '' ']'
|
395 |
+
+ '[' -n '' ']'
|
396 |
+
+ hash -r
|
397 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
398 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/opt/slurm/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin:/admin/home/nouamane/.cursor-server/bin/001668006cc714afd397f4ef0d52862f5a095530/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin
|
399 |
+
++ scontrol show hostnames 'ip-26-0-161-[78,103]'
|
400 |
+
+ export 'NODELIST=ip-26-0-161-78
|
401 |
+
ip-26-0-161-103'
|
402 |
+
+ NODELIST='ip-26-0-161-78
|
403 |
+
ip-26-0-161-103'
|
404 |
+
++ head -n1
|
405 |
+
++ scontrol show hostnames 'ip-26-0-161-[78,103]'
|
406 |
+
+ export MASTER_NODE=ip-26-0-161-78
|
407 |
+
+ MASTER_NODE=ip-26-0-161-78
|
408 |
+
+ export MASTER_PORT=12356
|
409 |
+
+ MASTER_PORT=12356
|
410 |
+
+ export NNODES=2
|
411 |
+
+ NNODES=2
|
412 |
+
+ export GPUS_PER_NODE=8
|
413 |
+
+ GPUS_PER_NODE=8
|
414 |
+
+ export WORLD_SIZE=16
|
415 |
+
+ WORLD_SIZE=16
|
416 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
417 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
418 |
+
+ export NCCL_DEBUG=WARN
|
419 |
+
+ NCCL_DEBUG=WARN
|
420 |
+
+ export NANOTRON_BENCHMARK=1
|
421 |
+
+ NANOTRON_BENCHMARK=1
|
422 |
+
+ export WANDB_MODE=disabled
|
423 |
+
+ WANDB_MODE=disabled
|
424 |
+
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
425 |
+
+ TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
426 |
+
+ echo '=== GPU Topology ==='
|
427 |
+
=== GPU Topology ===
|
428 |
+
+ nvidia-smi topo -m
|
429 |
+
[4mGPU0 GPU1 GPU2 GPU3 GPU4 GPU5 GPU6 GPU7 CPU Affinity NUMA Affinity GPU NUMA ID[0m
|
430 |
+
GPU0 X NV18 NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
431 |
+
GPU1 NV18 X NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
432 |
+
GPU2 NV18 NV18 X NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
433 |
+
GPU3 NV18 NV18 NV18 X NV18 NV18 NV18 NV18 0-47 0 N/A
|
434 |
+
GPU4 NV18 NV18 NV18 NV18 X NV18 NV18 NV18 48-95 1 N/A
|
435 |
+
GPU5 NV18 NV18 NV18 NV18 NV18 X NV18 NV18 48-95 1 N/A
|
436 |
+
GPU6 NV18 NV18 NV18 NV18 NV18 NV18 X NV18 48-95 1 N/A
|
437 |
+
GPU7 NV18 NV18 NV18 NV18 NV18 NV18 NV18 X 48-95 1 N/A
|
438 |
+
|
439 |
+
Legend:
|
440 |
+
|
441 |
+
X = Self
|
442 |
+
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
|
443 |
+
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
|
444 |
+
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
|
445 |
+
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
|
446 |
+
PIX = Connection traversing at most a single PCIe bridge
|
447 |
+
NV# = Connection traversing a bonded set of # NVLinks
|
448 |
+
+ echo ==================
|
449 |
+
==================
|
450 |
+
+ echo 'Master node: ip-26-0-161-78'
|
451 |
+
Master node: ip-26-0-161-78
|
452 |
+
+ echo 'All nodes: ip-26-0-161-78
|
453 |
+
ip-26-0-161-103'
|
454 |
+
All nodes: ip-26-0-161-78
|
455 |
+
ip-26-0-161-103
|
456 |
+
+ echo 'World size: 16'
|
457 |
+
World size: 16
|
458 |
+
+ srun --wait=0 --kill-on-bad-exit=1 torchrun --nnodes=2 --nproc_per_node=8 --rdzv_id=13443224 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-161-78:12356 --max_restarts 0 --rdzv_conf timeout=60 /fsx/nouamane/projects/nanotron/run_train.py --config-file benchmark/configs/config_469G_dp1_tp4_pp4_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
459 |
+
[2024-12-14 13:18:17,743] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
460 |
+
[2024-12-14 13:18:17,743] torch.distributed.run: [WARNING]
|
461 |
+
[2024-12-14 13:18:17,743] torch.distributed.run: [WARNING] *****************************************
|
462 |
+
[2024-12-14 13:18:17,743] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
463 |
+
[2024-12-14 13:18:17,743] torch.distributed.run: [WARNING] *****************************************
|
464 |
+
[2024-12-14 13:18:17,841] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
465 |
+
[2024-12-14 13:18:17,841] torch.distributed.run: [WARNING]
|
466 |
+
[2024-12-14 13:18:17,841] torch.distributed.run: [WARNING] *****************************************
|
467 |
+
[2024-12-14 13:18:17,841] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
468 |
+
[2024-12-14 13:18:17,841] torch.distributed.run: [WARNING] *****************************************
|
469 |
+
NCCL version 2.18.5+cuda12.2
|
470 |
+
12/14/2024 13:18:39 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Measuring inter-GPU and intra-node bandwidth...
|
471 |
+
NCCL version 2.18.5+cuda12.2
|
472 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Bandwidth measurement complete. Time taken: 15.44 seconds
|
473 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Config:
|
474 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Config(general=GeneralArgs(project='debug',
|
475 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: run='469G_dp1_tp4_pp4_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k',
|
476 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: seed=42,
|
477 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: step=None,
|
478 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: consumed_train_samples=None,
|
479 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
480 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: ignore_sanity_checks=True),
|
481 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: parallelism=ParallelismArgs(dp=1,
|
482 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pp=4,
|
483 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tp=4,
|
484 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f397709cb50>,
|
485 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
486 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tp_linear_async_communication=True,
|
487 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: recompute_layer=False,
|
488 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tp_recompute_allgather=True,
|
489 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: expert_parallel_size=1),
|
490 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
491 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: eos_token_id=0,
|
492 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: hidden_act='silu',
|
493 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: hidden_size=16384,
|
494 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: initializer_range=0.02,
|
495 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: intermediate_size=53248,
|
496 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: is_llama_config=True,
|
497 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: max_position_embeddings=4096,
|
498 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_attention_heads=128,
|
499 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_hidden_layers=126,
|
500 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_key_value_heads=128,
|
501 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pad_token_id=None,
|
502 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pretraining_tp=1,
|
503 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rms_norm_eps=1e-05,
|
504 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_scaling=None,
|
505 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_theta=10000.0,
|
506 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_interleaved=False,
|
507 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tie_word_embeddings=False,
|
508 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: use_cache=True,
|
509 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: vocab_size=131072),
|
510 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: init_method=RandomInit(std=0.02),
|
511 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: dtype=torch.bfloat16,
|
512 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: make_vocab_size_divisible_by=1,
|
513 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: ddp_bucket_cap_mb=25),
|
514 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
515 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tokenizer_revision=None,
|
516 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tokenizer_max_length=None),
|
517 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
518 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: checkpoint_interval=10000,
|
519 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: save_initial_state=False,
|
520 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: save_final_state=False,
|
521 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: resume_checkpoint_path=None,
|
522 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: checkpoints_path_is_shared_file_system=False),
|
523 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: logging=LoggingArgs(log_level='info',
|
524 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: log_level_replica='info',
|
525 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: iteration_step_info_interval=1),
|
526 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tokens=TokensArgs(sequence_length=4096,
|
527 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: train_steps=100,
|
528 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: micro_batch_size=32,
|
529 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: batch_accumulation_per_replica=8,
|
530 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: val_check_interval=100,
|
531 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: limit_val_batches=0,
|
532 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: limit_test_batches=0),
|
533 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
534 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: adam_beta1=0.9,
|
535 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: adam_beta2=0.95,
|
536 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: torch_adam_is_fused=True,
|
537 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: name='adamW'),
|
538 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: zero_stage=0,
|
539 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: weight_decay=0.01,
|
540 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: clip_grad=1.0,
|
541 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: accumulate_grad_in_fp32=True,
|
542 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
543 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lr_warmup_steps=2,
|
544 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lr_warmup_style='linear',
|
545 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lr_decay_style='cosine',
|
546 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lr_decay_steps=13,
|
547 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lr_decay_starting_step=None,
|
548 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: min_decay_lr=1e-05)),
|
549 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
550 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: start_training_step=1,
|
551 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: data=DataArgs(dataset=None,
|
552 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: seed=42,
|
553 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_loading_workers=1))],
|
554 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: profiler=None,
|
555 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: lighteval=None,
|
556 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: s3_upload=None)
|
557 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Model Config:
|
558 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: LlamaConfig(bos_token_id=0,
|
559 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: eos_token_id=0,
|
560 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: hidden_act='silu',
|
561 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: hidden_size=16384,
|
562 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: initializer_range=0.02,
|
563 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: intermediate_size=53248,
|
564 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: is_llama_config=True,
|
565 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: max_position_embeddings=4096,
|
566 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_attention_heads=128,
|
567 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_hidden_layers=126,
|
568 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: num_key_value_heads=128,
|
569 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pad_token_id=None,
|
570 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: pretraining_tp=1,
|
571 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rms_norm_eps=1e-05,
|
572 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_scaling=None,
|
573 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_theta=10000.0,
|
574 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: rope_interleaved=False,
|
575 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: tie_word_embeddings=False,
|
576 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: use_cache=True,
|
577 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: vocab_size=131072)
|
578 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Building model..
|
579 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Initialize RoPE Theta = 10000.0
|
580 |
+
12/14/2024 13:18:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Setting PP block ranks...
|
581 |
+
NCCL version 2.18.5+cuda12.2
|
582 |
+
NCCL version 2.18.5+cuda12.2
|
583 |
+
NCCL version 2.18.5+cuda12.2
|
584 |
+
NCCL version 2.18.5+cuda12.2
|
585 |
+
NCCL version 2.18.5+cuda12.2
|
586 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=3|ip-26-0-161-103]: Local number of parameters: 29.5G (56322.00MiB)
|
587 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=3|ip-26-0-161-103]: Local number of parameters: 30.1G (57346.00MiB)
|
588 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Total number of parameters: 469G (895263.62MiB)
|
589 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Local number of parameters: 30.1G (57346.00MiB)
|
590 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=0|ip-26-0-161-103]: Local number of parameters: 29.5G (56322.00MiB)
|
591 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=3|ip-26-0-161-103]: [After model building] Memory usage: 56322.04MiB. Peak allocated: 56322.05MiB Peak reserved: 57828.00MiB
|
592 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=1|ip-26-0-161-103]: Local number of parameters: 30.1G (57346.00MiB)
|
593 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=1|ip-26-0-161-103]: Local number of parameters: 29.5G (56322.00MiB)
|
594 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=1|ip-26-0-161-78]: Local number of parameters: 29.1G (55585.97MiB)
|
595 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=1|ip-26-0-161-78]: Local number of parameters: 28.6G (54561.94MiB)
|
596 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=3|ip-26-0-161-103]: [After model building] Memory usage: 57346.04MiB. Peak allocated: 57346.05MiB Peak reserved: 58852.00MiB
|
597 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=2|ip-26-0-161-103]: Local number of parameters: 29.5G (56322.00MiB)
|
598 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=2|ip-26-0-161-78]: Local number of parameters: 29.1G (55585.97MiB)
|
599 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=2|ip-26-0-161-78]: Local number of parameters: 28.6G (54561.94MiB)
|
600 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: [After model building] Memory usage: 57346.04MiB. Peak allocated: 57346.05MiB Peak reserved: 58756.00MiB
|
601 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=1|ip-26-0-161-78]: [After model building] Memory usage: 54561.98MiB. Peak allocated: 54561.98MiB Peak reserved: 56450.00MiB
|
602 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=1|ip-26-0-161-78]: [After model building] Memory usage: 55586.01MiB. Peak allocated: 55586.02MiB Peak reserved: 57476.00MiB
|
603 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=1|ip-26-0-161-103]: [After model building] Memory usage: 57346.04MiB. Peak allocated: 57346.05MiB Peak reserved: 58692.00MiB
|
604 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=0|ip-26-0-161-103]: [After model building] Memory usage: 56322.04MiB. Peak allocated: 56322.05MiB Peak reserved: 57988.00MiB
|
605 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=1|ip-26-0-161-103]: [After model building] Memory usage: 56322.04MiB. Peak allocated: 56322.05MiB Peak reserved: 57828.00MiB
|
606 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=3|ip-26-0-161-78]: Local number of parameters: 29.1G (55585.97MiB)
|
607 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=2|ip-26-0-161-103]: Local number of parameters: 30.1G (57346.00MiB)
|
608 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=3|ip-26-0-161-78]: Local number of parameters: 28.6G (54561.94MiB)
|
609 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=2|ip-26-0-161-78]: [After model building] Memory usage: 54561.98MiB. Peak allocated: 54561.98MiB Peak reserved: 56450.00MiB
|
610 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=2|ip-26-0-161-78]: [After model building] Memory usage: 55586.01MiB. Peak allocated: 55586.02MiB Peak reserved: 57476.00MiB
|
611 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=1|TP=2|ip-26-0-161-103]: [After model building] Memory usage: 56322.04MiB. Peak allocated: 56322.05MiB Peak reserved: 57892.00MiB
|
612 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=3|ip-26-0-161-78]: [After model building] Memory usage: 55586.01MiB. Peak allocated: 55586.02MiB Peak reserved: 57476.00MiB
|
613 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=3|ip-26-0-161-78]: [After model building] Memory usage: 54561.98MiB. Peak allocated: 54561.98MiB Peak reserved: 56450.00MiB
|
614 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=2|ip-26-0-161-103]: [After model building] Memory usage: 57346.04MiB. Peak allocated: 57346.05MiB Peak reserved: 58852.00MiB
|
615 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: No checkpoint path provided.
|
616 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: Parametrizing model parameters using StandardParametrizator
|
617 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=0|ip-26-0-161-78]: Local number of parameters: 28.6G (54561.94MiB)
|
618 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=0|ip-26-0-161-78]: Local number of parameters: 29.1G (55585.97MiB)
|
619 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=2|TP=0|ip-26-0-161-78]: [After model building] Memory usage: 54561.98MiB. Peak allocated: 54561.98MiB Peak reserved: 56450.00MiB
|
620 |
+
12/14/2024 13:19:02 [INFO|DP=0|PP=3|TP=0|ip-26-0-161-78]: [After model building] Memory usage: 55586.01MiB. Peak allocated: 55586.02MiB Peak reserved: 57476.00MiB
|
621 |
+
NCCL version 2.18.5+cuda12.2
|
622 |
+
NCCL version 2.18.5+cuda12.2
|
623 |
+
NCCL version 2.18.5+cuda12.2
|
624 |
+
NCCL version 2.18.5+cuda12.2
|
625 |
+
NCCL version 2.18.5+cuda12.2
|
626 |
+
NCCL version 2.18.5+cuda12.2
|
627 |
+
NCCL version 2.18.5+cuda12.2
|
628 |
+
NCCL version 2.18.5+cuda12.2
|
629 |
+
NCCL version 2.18.5+cuda12.2
|
630 |
+
12/14/2024 13:19:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-161-103]: [Optimizer Building] Using LearningRateForSP as learning rate
|
631 |
+
Traceback (most recent call last):
|
632 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
633 |
+
trainer = DistributedTrainer(config_file)
|
634 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
635 |
+
Traceback (most recent call last):
|
636 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
637 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
638 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
639 |
+
Traceback (most recent call last):
|
640 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
641 |
+
trainer = DistributedTrainer(config_file)
|
642 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
643 |
+
trainer = DistributedTrainer(config_file)
|
644 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
645 |
+
optimizer = optimizer_builder(named_param_groups)
|
646 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
647 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
648 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
649 |
+
Traceback (most recent call last):
|
650 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
651 |
+
result = OptimizerFromGradientAccumulator(
|
652 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
653 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
654 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
655 |
+
optimizer = optimizer_builder(named_param_groups)
|
656 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
657 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())optimizer = optimizer_builder(named_param_groups)
|
658 |
+
|
659 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
660 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
661 |
+
result = OptimizerFromGradientAccumulator(
|
662 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
663 |
+
trainer = DistributedTrainer(config_file)
|
664 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
665 |
+
result = OptimizerFromGradientAccumulator(
|
666 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
667 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
668 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
669 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
670 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
671 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
672 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
673 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
674 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
675 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
676 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
677 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
678 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator( File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
679 |
+
|
680 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
681 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
682 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 108.57 GiB. GPU 7 has a total capacty of 79.33 GiB of which 13.54 GiB is free. Including non-PyTorch memory, this process has 65.78 GiB memory in use. Of the allocated memory 54.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
683 |
+
optimizer = optimizer_builder(named_param_groups)
|
684 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
685 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
686 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
687 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
688 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
689 |
+
result = OptimizerFromGradientAccumulator(
|
690 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
691 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
692 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 108.57 GiB. GPU 4 has a total capacty of 79.33 GiB of which 13.14 GiB is free. Including non-PyTorch memory, this process has 66.17 GiB memory in use. Of the allocated memory 54.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
693 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
694 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 108.57 GiB. GPU 6 has a total capacty of 79.33 GiB of which 13.11 GiB is free. Including non-PyTorch memory, this process has 66.21 GiB memory in use. Of the allocated memory 54.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
695 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
696 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
697 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
698 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
699 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
700 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
701 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
702 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 108.57 GiB. GPU 5 has a total capacty of 79.33 GiB of which 13.11 GiB is free. Including non-PyTorch memory, this process has 66.21 GiB memory in use. Of the allocated memory 54.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
703 |
+
Traceback (most recent call last):
|
704 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
705 |
+
trainer = DistributedTrainer(config_file)
|
706 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
707 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
708 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
709 |
+
optimizer = optimizer_builder(named_param_groups)
|
710 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
711 |
+
result = OptimizerFromGradientAccumulator(
|
712 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
713 |
+
Traceback (most recent call last):
|
714 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
715 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
716 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
717 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
718 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
719 |
+
trainer = DistributedTrainer(config_file)
|
720 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
721 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
722 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
723 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
724 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
725 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
726 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 106.57 GiB. GPU 0 has a total capacty of 79.33 GiB of which 14.19 GiB is free. Including non-PyTorch memory, this process has 65.12 GiB memory in use. Of the allocated memory 53.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONFTraceback (most recent call last):
|
727 |
+
|
728 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
729 |
+
optimizer = optimizer_builder(named_param_groups)
|
730 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
731 |
+
Traceback (most recent call last):
|
732 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
733 |
+
result = OptimizerFromGradientAccumulator(
|
734 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
735 |
+
trainer = DistributedTrainer(config_file)
|
736 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
737 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
738 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
739 |
+
trainer = DistributedTrainer(config_file)self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
740 |
+
|
741 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
742 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
743 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
744 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
745 |
+
optimizer = optimizer_builder(named_param_groups)self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
746 |
+
|
747 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
748 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
749 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
750 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
751 |
+
result = OptimizerFromGradientAccumulator(
|
752 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
753 |
+
optimizer = optimizer_builder(named_param_groups)
|
754 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
755 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
756 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 106.57 GiB. GPU 3 has a total capacty of 79.33 GiB of which 14.23 GiB is free. Including non-PyTorch memory, this process has 65.09 GiB memory in use. Of the allocated memory 53.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
757 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
758 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
759 |
+
result = OptimizerFromGradientAccumulator(
|
760 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
761 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
762 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
763 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
764 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
765 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
766 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
767 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
768 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
769 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
770 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 106.57 GiB. GPU 1 has a total capacty of 79.33 GiB of which 14.12 GiB is free. Including non-PyTorch memory, this process has 65.20 GiB memory in use. Of the allocated memory 53.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
771 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
772 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
773 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
774 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 106.57 GiB. GPU 2 has a total capacty of 79.33 GiB of which 14.12 GiB is free. Including non-PyTorch memory, this process has 65.20 GiB memory in use. Of the allocated memory 53.28 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
775 |
+
Traceback (most recent call last):
|
776 |
+
Traceback (most recent call last):
|
777 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
778 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
779 |
+
trainer = DistributedTrainer(config_file)
|
780 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
781 |
+
trainer = DistributedTrainer(config_file)
|
782 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
783 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
784 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
785 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
786 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
787 |
+
optimizer = optimizer_builder(named_param_groups)
|
788 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
789 |
+
optimizer = optimizer_builder(named_param_groups)
|
790 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
791 |
+
result = OptimizerFromGradientAccumulator(
|
792 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
793 |
+
result = OptimizerFromGradientAccumulator(
|
794 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
795 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
796 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
797 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
798 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
799 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
800 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
801 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
802 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
803 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
804 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
805 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
806 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 110.00 GiB. GPU 7 has a total capacty of 79.33 GiB of which 13.20 GiB is free. Including non-PyTorch memory, this process has 66.12 GiB memory in use. Of the allocated memory 55.00 GiB is allocated by PyTorch, and 1.35 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
807 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
808 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
809 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
810 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 110.00 GiB. GPU 6 has a total capacty of 79.33 GiB of which 12.71 GiB is free. Including non-PyTorch memory, this process has 66.61 GiB memory in use. Of the allocated memory 55.00 GiB is allocated by PyTorch, and 1.41 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
811 |
+
Traceback (most recent call last):
|
812 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
813 |
+
trainer = DistributedTrainer(config_file)
|
814 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
815 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
816 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
817 |
+
optimizer = optimizer_builder(named_param_groups)
|
818 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
819 |
+
result = OptimizerFromGradientAccumulator(
|
820 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
821 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
822 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
823 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
824 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
825 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
826 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
827 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
828 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 110.00 GiB. GPU 4 has a total capacty of 79.33 GiB of which 12.64 GiB is free. Including non-PyTorch memory, this process has 66.67 GiB memory in use. Of the allocated memory 55.00 GiB is allocated by PyTorch, and 1.50 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
829 |
+
Traceback (most recent call last):
|
830 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
831 |
+
trainer = DistributedTrainer(config_file)
|
832 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
833 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
834 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
835 |
+
optimizer = optimizer_builder(named_param_groups)
|
836 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
837 |
+
result = OptimizerFromGradientAccumulator(
|
838 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
839 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
840 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
841 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
842 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
843 |
+
Traceback (most recent call last):
|
844 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
845 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
846 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
847 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
848 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 110.00 GiB. GPU 5 has a total capacty of 79.33 GiB of which 12.77 GiB is free. Including non-PyTorch memory, this process has 66.55 GiB memory in use. Of the allocated memory 55.00 GiB is allocated by PyTorch, and 1.35 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
849 |
+
trainer = DistributedTrainer(config_file)
|
850 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
851 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
852 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
853 |
+
optimizer = optimizer_builder(named_param_groups)
|
854 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
855 |
+
result = OptimizerFromGradientAccumulator(
|
856 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
857 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
858 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
859 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
860 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
861 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
862 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
863 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
864 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 GiB. GPU 0 has a total capacty of 79.33 GiB of which 11.94 GiB is free. Including non-PyTorch memory, this process has 67.38 GiB memory in use. Of the allocated memory 56.00 GiB is allocated by PyTorch, and 1.25 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
865 |
+
Traceback (most recent call last):
|
866 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
867 |
+
Traceback (most recent call last):
|
868 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
869 |
+
trainer = DistributedTrainer(config_file)
|
870 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
871 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
872 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
873 |
+
trainer = DistributedTrainer(config_file)
|
874 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
875 |
+
optimizer = optimizer_builder(named_param_groups)
|
876 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
877 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
878 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
879 |
+
result = OptimizerFromGradientAccumulator(
|
880 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
881 |
+
optimizer = optimizer_builder(named_param_groups)
|
882 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
883 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
884 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
885 |
+
result = OptimizerFromGradientAccumulator(
|
886 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
887 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
888 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
889 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
890 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
891 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
892 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
893 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
894 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
895 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
896 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 GiB. GPU 1 has a total capacty of 79.33 GiB of which 11.93 GiB is free. Including non-PyTorch memory, this process has 67.39 GiB memory in use. Of the allocated memory 56.00 GiB is allocated by PyTorch, and 1.19 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
897 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
898 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
899 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")Traceback (most recent call last):
|
900 |
+
|
901 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 248, in <module>
|
902 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 GiB. GPU 2 has a total capacty of 79.33 GiB of which 11.77 GiB is free. Including non-PyTorch memory, this process has 67.55 GiB memory in use. Of the allocated memory 56.00 GiB is allocated by PyTorch, and 1.35 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
903 |
+
trainer = DistributedTrainer(config_file)
|
904 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 183, in __init__
|
905 |
+
self.optimizer, self.grad_accumulator = init_optimizer_and_grad_accumulator(
|
906 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 402, in init_optimizer_and_grad_accumulator
|
907 |
+
optimizer = optimizer_builder(named_param_groups)
|
908 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 364, in grad_optimizer_builder
|
909 |
+
result = OptimizerFromGradientAccumulator(
|
910 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/optimizer_from_gradient_accumulator.py", line 32, in __init__
|
911 |
+
gradient_accumulator = gradient_accumulator_builder(name_to_param.items())
|
912 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/helpers.py", line 365, in <lambda>
|
913 |
+
gradient_accumulator_builder=lambda named_params: FP32GradientAccumulator(
|
914 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 78, in __init__
|
915 |
+
self.fp32_grad_buffers, self._contiguous_fp32_grad_buffer = self.build_grad_buffers(
|
916 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/optim/gradient_accumulator.py", line 173, in build_grad_buffers
|
917 |
+
contiguous_buffer_f32_gradients = torch.zeros(needed_buffer_size, dtype=torch.float, device="cuda")
|
918 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 GiB. GPU 3 has a total capacty of 79.33 GiB of which 11.89 GiB is free. Including non-PyTorch memory, this process has 67.43 GiB memory in use. Of the allocated memory 56.00 GiB is allocated by PyTorch, and 1.35 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
919 |
+
terminate called after throwing an instance of 'c10::Error'
|
920 |
+
what(): CUDA driver error: unknown error
|
921 |
+
Exception raised from _hasPrimaryContext at /opt/conda/conda-bld/pytorch_1699449201336/work/aten/src/ATen/cuda/detail/CUDAHooks.cpp:67 (most recent call first):
|
922 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fba04ed3617 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libc10.so)
|
923 |
+
frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, char const*) + 0x68 (0x7fba04e8ea56 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libc10.so)
|
924 |
+
frame #2: <unknown function> + 0xe7124f (0x7fba05e2f24f in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
925 |
+
frame #3: c10::cuda::MaybeSetDevice(int) + 0xc (0x7fba04f904cc in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libc10_cuda.so)
|
926 |
+
frame #4: std::_Sp_counted_ptr_inplace<std::vector<at::cuda::CUDAEvent, std::allocator<at::cuda::CUDAEvent> >, std::allocator<std::vector<at::cuda::CUDAEvent, std::allocator<at::cuda::CUDAEvent> > >, (__gnu_cxx::_Lock_policy)2>::_M_dispose() + 0x98 (0x7fba05f3ec78 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
927 |
+
frame #5: std::_Sp_counted_base<(__gnu_cxx::_Lock_policy)2>::_M_release() + 0x48 (0x7fba4bef1808 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
|
928 |
+
frame #6: c10d::ProcessGroupNCCL::WorkNCCL::~WorkNCCL() + 0x135 (0x7fba05f09de5 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
929 |
+
frame #7: c10d::ProcessGroupNCCL::workCleanupLoop() + 0x3c5 (0x7fba05f1f715 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
930 |
+
frame #8: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x78 (0x7fba05f1f8a8 in /fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
931 |
+
frame #9: <unknown function> + 0xd3e95 (0x7fba9761be95 in /fsx/nouamane/miniconda/envs/2-1-cu121/bin/../lib/libstdc++.so.6)
|
932 |
+
frame #10: <unknown function> + 0x8609 (0x7fba97a8b609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
933 |
+
frame #11: clone + 0x43 (0x7fba97854353 in /lib/x86_64-linux-gnu/libc.so.6)
|
934 |
+
|
935 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147915 closing signal SIGTERM
|
936 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147917 closing signal SIGTERM
|
937 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147918 closing signal SIGTERM
|
938 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147919 closing signal SIGTERM
|
939 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147920 closing signal SIGTERM
|
940 |
+
[2024-12-14 13:19:19,999] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 147922 closing signal SIGTERM
|
941 |
+
[2024-12-14 13:19:21,779] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: -6) local_rank: 1 (pid: 147916) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
942 |
+
Traceback (most recent call last):
|
943 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
944 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
945 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
946 |
+
return f(*args, **kwargs)
|
947 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
948 |
+
run(args)
|
949 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
950 |
+
elastic_launch(
|
951 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
952 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
953 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
954 |
+
raise ChildFailedError(
|
955 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
956 |
+
============================================================
|
957 |
+
/fsx/nouamane/projects/nanotron/run_train.py FAILED
|
958 |
+
------------------------------------------------------------
|
959 |
+
Failures:
|
960 |
+
[1]:
|
961 |
+
time : 2024-12-14_13:19:19
|
962 |
+
host : ip-26-0-161-78.ec2.internal
|
963 |
+
rank : 14 (local_rank: 6)
|
964 |
+
exitcode : 1 (pid: 147921)
|
965 |
+
error_file: <N/A>
|
966 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
967 |
+
------------------------------------------------------------
|
968 |
+
Root Cause (first observed failure):
|
969 |
+
[0]:
|
970 |
+
time : 2024-12-14_13:19:19
|
971 |
+
host : ip-26-0-161-78.ec2.internal
|
972 |
+
rank : 9 (local_rank: 1)
|
973 |
+
exitcode : -6 (pid: 147916)
|
974 |
+
error_file: <N/A>
|
975 |
+
traceback : Signal 6 (SIGABRT) received by PID 147916
|
976 |
+
============================================================
|
977 |
+
srun: error: ip-26-0-161-78: task 0: Exited with exit code 1
|
978 |
+
srun: launch/slurm: _step_signal: Terminating StepId=13443224.0
|
979 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
980 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28815 closing signal SIGTERM
|
981 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28816 closing signal SIGTERM
|
982 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28817 closing signal SIGTERM
|
983 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28818 closing signal SIGTERM
|
984 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28819 closing signal SIGTERM
|
985 |
+
[2024-12-14 13:19:22,094] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 28820 closing signal SIGTERM
|
986 |
+
Traceback (most recent call last):
|
987 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 33, in <module>
|
988 |
+
sys.exit(load_entry_point('torch==2.1.1', 'console_scripts', 'torchrun')())
|
989 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
990 |
+
return f(*args, **kwargs)
|
991 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
992 |
+
run(args)
|
993 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
994 |
+
elastic_launch(
|
995 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
996 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
997 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
998 |
+
result = agent.run()
|
999 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
1000 |
+
result = f(*args, **kwargs)
|
1001 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
1002 |
+
result = self._invoke_run(role)
|
1003 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 877, in _invoke_run
|
1004 |
+
time.sleep(monitor_interval)
|
1005 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
1006 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
1007 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 28742 got signal: 15
|
1008 |
+
srun: error: ip-26-0-161-103: task 1: Exited with exit code 1
|
logs/13458865-bench_1.34G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13458918-bench_3.57G_dp32_tp2_pp2_acc1_mbs8_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13460158-bench_3.57G_dp4_tp4_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,923 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /etc/profile.d/modules.sh
|
2 |
+
++ . /usr/share/modules/init/bash
|
3 |
+
+++ unset _mlshdbg
|
4 |
+
+++ '[' 0 = 1 ']'
|
5 |
+
+++ unset _mlre _mlIFS
|
6 |
+
+++ '[' -n x ']'
|
7 |
+
+++ _mlIFS='
|
8 |
+
'
|
9 |
+
+++ IFS=' '
|
10 |
+
+++ '[' -n '' ']'
|
11 |
+
++++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash autoinit
|
12 |
+
+++ _mlcode='module() {
|
13 |
+
unset _mlshdbg;
|
14 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
15 |
+
case "$-" in
|
16 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
17 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
18 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
19 |
+
*) _mlshdbg='\'''\'' ;;
|
20 |
+
esac;
|
21 |
+
fi;
|
22 |
+
unset _mlre _mlIFS;
|
23 |
+
if [ -n "${IFS+x}" ]; then
|
24 |
+
_mlIFS=$IFS;
|
25 |
+
fi;
|
26 |
+
IFS='\'' '\'';
|
27 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
28 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
29 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
30 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
31 |
+
fi;
|
32 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
33 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
34 |
+
fi;
|
35 |
+
done;
|
36 |
+
if [ -n "${_mlre:-}" ]; then
|
37 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
38 |
+
else
|
39 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
40 |
+
fi;
|
41 |
+
_mlstatus=$?;
|
42 |
+
if [ -n "${_mlIFS+x}" ]; then
|
43 |
+
IFS=$_mlIFS;
|
44 |
+
else
|
45 |
+
unset IFS;
|
46 |
+
fi;
|
47 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
48 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
49 |
+
set -$_mlshdbg;
|
50 |
+
fi;
|
51 |
+
unset _mlshdbg;
|
52 |
+
return $_mlstatus;
|
53 |
+
};
|
54 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
55 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
56 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
57 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
58 |
+
test 0;'
|
59 |
+
+++ _mlret=0
|
60 |
+
+++ '[' -n x ']'
|
61 |
+
+++ IFS='
|
62 |
+
'
|
63 |
+
+++ unset _mlIFS
|
64 |
+
+++ unset _mlre _mlv _mlrv
|
65 |
+
+++ '[' 0 -eq 0 ']'
|
66 |
+
+++ eval 'module() {
|
67 |
+
unset _mlshdbg;
|
68 |
+
if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '\''1'\'' ]; then
|
69 |
+
case "$-" in
|
70 |
+
*v*x*) set +vx; _mlshdbg='\''vx'\'' ;;
|
71 |
+
*v*) set +v; _mlshdbg='\''v'\'' ;;
|
72 |
+
*x*) set +x; _mlshdbg='\''x'\'' ;;
|
73 |
+
*) _mlshdbg='\'''\'' ;;
|
74 |
+
esac;
|
75 |
+
fi;
|
76 |
+
unset _mlre _mlIFS;
|
77 |
+
if [ -n "${IFS+x}" ]; then
|
78 |
+
_mlIFS=$IFS;
|
79 |
+
fi;
|
80 |
+
IFS='\'' '\'';
|
81 |
+
for _mlv in ${MODULES_RUN_QUARANTINE:-}; do
|
82 |
+
if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
|
83 |
+
if [ -n "`eval '\''echo ${'\''$_mlv'\''+x}'\''`" ]; then
|
84 |
+
_mlre="${_mlre:-}${_mlv}_modquar='\''`eval '\''echo ${'\''$_mlv'\''}'\''`'\'' ";
|
85 |
+
fi;
|
86 |
+
_mlrv="MODULES_RUNENV_${_mlv}";
|
87 |
+
_mlre="${_mlre:-}${_mlv}='\''`eval '\''echo ${'\''$_mlrv'\'':-}'\''`'\'' ";
|
88 |
+
fi;
|
89 |
+
done;
|
90 |
+
if [ -n "${_mlre:-}" ]; then
|
91 |
+
eval `eval ${_mlre}/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash '\''"$@"'\''`;
|
92 |
+
else
|
93 |
+
eval `/usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash "$@"`;
|
94 |
+
fi;
|
95 |
+
_mlstatus=$?;
|
96 |
+
if [ -n "${_mlIFS+x}" ]; then
|
97 |
+
IFS=$_mlIFS;
|
98 |
+
else
|
99 |
+
unset IFS;
|
100 |
+
fi;
|
101 |
+
unset _mlre _mlv _mlrv _mlIFS;
|
102 |
+
if [ -n "${_mlshdbg:-}" ]; then
|
103 |
+
set -$_mlshdbg;
|
104 |
+
fi;
|
105 |
+
unset _mlshdbg;
|
106 |
+
return $_mlstatus;
|
107 |
+
};
|
108 |
+
MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl; export MODULES_CMD;
|
109 |
+
ENV=/usr/share/modules/init/profile.sh; export ENV;
|
110 |
+
BASH_ENV=/usr/share/modules/init/bash; export BASH_ENV;
|
111 |
+
MODULESHOME=/usr/share/modules; export MODULESHOME;
|
112 |
+
test 0;'
|
113 |
+
++++ MODULES_CMD=/usr/lib/x86_64-linux-gnu/modulecmd.tcl
|
114 |
+
++++ export MODULES_CMD
|
115 |
+
++++ ENV=/usr/share/modules/init/profile.sh
|
116 |
+
++++ export ENV
|
117 |
+
++++ BASH_ENV=/usr/share/modules/init/bash
|
118 |
+
++++ export BASH_ENV
|
119 |
+
++++ MODULESHOME=/usr/share/modules
|
120 |
+
++++ export MODULESHOME
|
121 |
+
++++ test 0
|
122 |
+
+++ '[' 0 = 1 ']'
|
123 |
+
+++ '[' -t 2 ']'
|
124 |
+
+++ export -f module
|
125 |
+
+++ export -f switchml
|
126 |
+
+++ '[' 5 -ge 3 ']'
|
127 |
+
+++ [[ ehxB =~ i ]]
|
128 |
+
+++ [[ ! :/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin: =~ :/usr/bin: ]]
|
129 |
+
+++ '[' '!' -n x ']'
|
130 |
+
++++ manpath
|
131 |
+
+++ [[ ! :/admin/home/nouamane/.local/share/man:/fsx/nouamane/miniconda/envs/2-1-cu121/man:/fsx/nouamane/miniconda/envs/2-1-cu121/share/man:/fsx/nouamane/miniconda/man:/fsx/nouamane/miniconda/share/man:/opt/slurm/share/man:/opt/amazon/openmpi/share/man:/opt/amazon/efa/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/admin/home/nouamane/.fzf/man:: =~ :/usr/share/man: ]]
|
132 |
+
+++ unset _mlcode _mlret
|
133 |
+
+++ '[' -n '' ']'
|
134 |
+
+ module load cuda/12.1
|
135 |
+
+ unset _mlshdbg
|
136 |
+
+ '[' 0 = 1 ']'
|
137 |
+
+ unset _mlre _mlIFS
|
138 |
+
+ '[' -n x ']'
|
139 |
+
+ _mlIFS='
|
140 |
+
'
|
141 |
+
+ IFS=' '
|
142 |
+
+ '[' -n '' ']'
|
143 |
+
++ /usr/bin/tclsh8.6 /usr/lib/x86_64-linux-gnu/modulecmd.tcl bash load cuda/12.1
|
144 |
+
+ eval 'CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include;' export 'CPATH;
|
145 |
+
LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/opt/aws-ofi-nccl/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/targets/x86_64-linux/lib/:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/lib:/usr/lib;' export 'LD_LIBRARY_PATH;
|
146 |
+
FI_EFA_FORK_SAFE=1;' export 'FI_EFA_FORK_SAFE;
|
147 |
+
MANPATH=/usr/local/cuda-12.1/share/man::;' export 'MANPATH;
|
148 |
+
LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64;' export 'LIBRARY_PATH;
|
149 |
+
_LMFILES_=/usr/share/modules/modulefiles/cuda/12.1;' export '_LMFILES_;
|
150 |
+
LOADEDMODULES=cuda/12.1;' export 'LOADEDMODULES;
|
151 |
+
MPI_PATH=/opt/amazon/openmpi;' export 'MPI_PATH;
|
152 |
+
NCCL_HOME_modshare=/opt/nccl/build:1;' export 'NCCL_HOME_modshare;
|
153 |
+
NCCL_PROTO=simple;' export 'NCCL_PROTO;
|
154 |
+
MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1;' export 'MANPATH_modshare;
|
155 |
+
LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1;' export 'LIBRARY_PATH_modshare;
|
156 |
+
NCCL_SOCKET_IFNAME=enp;' export 'NCCL_SOCKET_IFNAME;
|
157 |
+
AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl;' export 'AWS_OFI_NCCL_HOME;
|
158 |
+
NCCL_HOME=/opt/nccl/build;' export 'NCCL_HOME;
|
159 |
+
FI_PROVIDER=efa;' export 'FI_PROVIDER;
|
160 |
+
AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1;' export 'AWS_OFI_NCCL_HOME_modshare;
|
161 |
+
CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1;' export 'CPATH_modshare;
|
162 |
+
LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib/:1:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2:/opt/aws-ofi-nccl/lib:2:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/efa/lib:1:/usr/lib:1;' export 'LD_LIBRARY_PATH_modshare;
|
163 |
+
FI_EFA_ENABLE_SHM_TRANSFER=1;' export 'FI_EFA_ENABLE_SHM_TRANSFER;
|
164 |
+
_LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1;' export '_LMFILES__modshare;
|
165 |
+
LOADEDMODULES_modshare=cuda/12.1:1;' export 'LOADEDMODULES_modshare;
|
166 |
+
MPI_PATH_modshare=/opt/amazon/openmpi:1;' export 'MPI_PATH_modshare;
|
167 |
+
PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin;' export 'PATH;
|
168 |
+
CUDA_HOME=/usr/local/cuda-12.1;' export 'CUDA_HOME;
|
169 |
+
PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1;' export 'PATH_modshare;
|
170 |
+
test' '0;'
|
171 |
+
++ CPATH=/opt/nccl/build/include:/usr/local/cuda-12.1/include
|
172 |
+
++ export CPATH
|
173 |
+
++ LD_LIBRARY_PATH=/opt/nccl/build/lib:/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/usr/local/cuda-12.1/efa/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/cuda-12.1/targets/x86_64-linux/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/opt/aws-ofi-nccl/lib:/usr/local/cuda-12.1/lib:/usr/local/cuda-12.1/lib64:/usr/local/cuda-12.1:/usr/local/cuda-12.1/targets/x86_64-linux/lib/:/usr/local/cuda-12.1/extras/CUPTI/lib64:/usr/local/lib:/usr/lib
|
174 |
+
++ export LD_LIBRARY_PATH
|
175 |
+
++ FI_EFA_FORK_SAFE=1
|
176 |
+
++ export FI_EFA_FORK_SAFE
|
177 |
+
++ MANPATH=/usr/local/cuda-12.1/share/man::
|
178 |
+
++ export MANPATH
|
179 |
+
++ LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/nccl/build/lib:/usr/local/cuda-12.1/lib64
|
180 |
+
++ export LIBRARY_PATH
|
181 |
+
++ _LMFILES_=/usr/share/modules/modulefiles/cuda/12.1
|
182 |
+
++ export _LMFILES_
|
183 |
+
++ LOADEDMODULES=cuda/12.1
|
184 |
+
++ export LOADEDMODULES
|
185 |
+
++ MPI_PATH=/opt/amazon/openmpi
|
186 |
+
++ export MPI_PATH
|
187 |
+
++ NCCL_HOME_modshare=/opt/nccl/build:1
|
188 |
+
++ export NCCL_HOME_modshare
|
189 |
+
++ NCCL_PROTO=simple
|
190 |
+
++ export NCCL_PROTO
|
191 |
+
++ MANPATH_modshare=:1:/usr/local/cuda-12.1/share/man:1
|
192 |
+
++ export MANPATH_modshare
|
193 |
+
++ LIBRARY_PATH_modshare=/opt/aws-ofi-nccl/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:1
|
194 |
+
++ export LIBRARY_PATH_modshare
|
195 |
+
++ NCCL_SOCKET_IFNAME=enp
|
196 |
+
++ export NCCL_SOCKET_IFNAME
|
197 |
+
++ AWS_OFI_NCCL_HOME=/opt/aws-ofi-nccl
|
198 |
+
++ export AWS_OFI_NCCL_HOME
|
199 |
+
++ NCCL_HOME=/opt/nccl/build
|
200 |
+
++ export NCCL_HOME
|
201 |
+
++ FI_PROVIDER=efa
|
202 |
+
++ export FI_PROVIDER
|
203 |
+
++ AWS_OFI_NCCL_HOME_modshare=/opt/aws-ofi-nccl:1
|
204 |
+
++ export AWS_OFI_NCCL_HOME_modshare
|
205 |
+
++ CPATH_modshare=/usr/local/cuda-12.1/include:1:/opt/nccl/build/include:1
|
206 |
+
++ export CPATH_modshare
|
207 |
+
++ LD_LIBRARY_PATH_modshare=/opt/amazon/efa/lib:3:/usr/local/cuda-12.1/extras/CUPTI/lib64:2:/usr/local/lib:1:/opt/nccl/build/lib:1:/usr/local/cuda-12.1/lib64:2:/usr/local/cuda-12.1:2:/usr/local/cuda-12.1/targets/x86_64-linux/lib/:1:/usr/local/cuda-12.1/targets/x86_64-linux/lib:2:/opt/aws-ofi-nccl/lib:2:/opt/amazon/openmpi/lib:2:/usr/local/cuda-12.1/lib:1:/usr/local/cuda-12.1/efa/lib:1:/usr/lib:1
|
208 |
+
++ export LD_LIBRARY_PATH_modshare
|
209 |
+
++ FI_EFA_ENABLE_SHM_TRANSFER=1
|
210 |
+
++ export FI_EFA_ENABLE_SHM_TRANSFER
|
211 |
+
++ _LMFILES__modshare=/usr/share/modules/modulefiles/cuda/12.1:1
|
212 |
+
++ export _LMFILES__modshare
|
213 |
+
++ LOADEDMODULES_modshare=cuda/12.1:1
|
214 |
+
++ export LOADEDMODULES_modshare
|
215 |
+
++ MPI_PATH_modshare=/opt/amazon/openmpi:1
|
216 |
+
++ export MPI_PATH_modshare
|
217 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
218 |
+
++ export PATH
|
219 |
+
++ CUDA_HOME=/usr/local/cuda-12.1
|
220 |
+
++ export CUDA_HOME
|
221 |
+
++ PATH_modshare=/usr/local/cuda-12.1/efa/test-cuda-12.1:1:/usr/bin:1:/usr/local/cuda-12.1/include:1:/opt/amazon/efa/bin:1:/admin/home/nouamane/.local/bin:1:/usr/local/bin:1:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:1:/bin:1:/snap/bin:1:/fsx/nouamane/miniconda/condabin:1:/sbin:1:/usr/sbin:1:/fsx/nouamane/miniconda/bin:1:/opt/slurm/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/local/cuda-12.1/bin:2:/opt/amazon/openmpi/bin:1:/admin/home/nouamane/.fzf/bin:1:/usr/local/games:1
|
222 |
+
++ export PATH_modshare
|
223 |
+
++ test 0
|
224 |
+
+ _mlstatus=0
|
225 |
+
+ '[' -n x ']'
|
226 |
+
+ IFS='
|
227 |
+
'
|
228 |
+
+ unset _mlre _mlv _mlrv _mlIFS
|
229 |
+
+ '[' -n '' ']'
|
230 |
+
+ unset _mlshdbg
|
231 |
+
+ return 0
|
232 |
+
+ source /fsx/nouamane/miniconda/bin/activate
|
233 |
+
++ _CONDA_ROOT=/fsx/nouamane/miniconda
|
234 |
+
++ . /fsx/nouamane/miniconda/etc/profile.d/conda.sh
|
235 |
+
+++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
236 |
+
+++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
237 |
+
+++ export _CE_M=
|
238 |
+
+++ _CE_M=
|
239 |
+
+++ export _CE_CONDA=
|
240 |
+
+++ _CE_CONDA=
|
241 |
+
+++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
242 |
+
+++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
243 |
+
+++ '[' -z x ']'
|
244 |
+
++ conda activate
|
245 |
+
++ local cmd=activate
|
246 |
+
++ case "$cmd" in
|
247 |
+
++ __conda_activate activate
|
248 |
+
++ '[' -n '' ']'
|
249 |
+
++ local ask_conda
|
250 |
+
+++ PS1=
|
251 |
+
+++ __conda_exe shell.posix activate
|
252 |
+
+++ /fsx/nouamane/miniconda/bin/conda shell.posix activate
|
253 |
+
++ ask_conda='PS1='\''(base) '\''
|
254 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin'\''
|
255 |
+
export CONDA_SHLVL='\''1'\''
|
256 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\'''
|
257 |
+
++ eval 'PS1='\''(base) '\''
|
258 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin'\''
|
259 |
+
export CONDA_SHLVL='\''1'\''
|
260 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\'''
|
261 |
+
+++ PS1='(base) '
|
262 |
+
+++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
263 |
+
+++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
264 |
+
+++ export CONDA_SHLVL=1
|
265 |
+
+++ CONDA_SHLVL=1
|
266 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
267 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
268 |
+
++ __conda_hashr
|
269 |
+
++ '[' -n '' ']'
|
270 |
+
++ '[' -n '' ']'
|
271 |
+
++ hash -r
|
272 |
+
+ conda activate 2-1-cu121
|
273 |
+
+ local cmd=activate
|
274 |
+
+ case "$cmd" in
|
275 |
+
+ __conda_activate activate 2-1-cu121
|
276 |
+
+ '[' -n '' ']'
|
277 |
+
+ local ask_conda
|
278 |
+
++ PS1='(base) '
|
279 |
+
++ __conda_exe shell.posix activate 2-1-cu121
|
280 |
+
++ /fsx/nouamane/miniconda/bin/conda shell.posix activate 2-1-cu121
|
281 |
+
+ ask_conda='PS1='\''(2-1-cu121) '\''
|
282 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin'\''
|
283 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
284 |
+
export CONDA_SHLVL='\''2'\''
|
285 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
286 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
287 |
+
export CONDA_PREFIX_1='\''/fsx/nouamane/miniconda'\''
|
288 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
289 |
+
export _CE_M='\'''\''
|
290 |
+
export _CE_CONDA='\'''\''
|
291 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
292 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
293 |
+
+ eval 'PS1='\''(2-1-cu121) '\''
|
294 |
+
export PATH='\''/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin'\''
|
295 |
+
export CONDA_PREFIX='\''/fsx/nouamane/miniconda/envs/2-1-cu121'\''
|
296 |
+
export CONDA_SHLVL='\''2'\''
|
297 |
+
export CONDA_DEFAULT_ENV='\''2-1-cu121'\''
|
298 |
+
export CONDA_PROMPT_MODIFIER='\''(2-1-cu121) '\''
|
299 |
+
export CONDA_PREFIX_1='\''/fsx/nouamane/miniconda'\''
|
300 |
+
export CONDA_EXE='\''/fsx/nouamane/miniconda/bin/conda'\''
|
301 |
+
export _CE_M='\'''\''
|
302 |
+
export _CE_CONDA='\'''\''
|
303 |
+
export CONDA_PYTHON_EXE='\''/fsx/nouamane/miniconda/bin/python'\''
|
304 |
+
. "/fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh"'
|
305 |
+
++ PS1='(2-1-cu121) '
|
306 |
+
++ export PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
307 |
+
++ PATH=/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
308 |
+
++ export CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
309 |
+
++ CONDA_PREFIX=/fsx/nouamane/miniconda/envs/2-1-cu121
|
310 |
+
++ export CONDA_SHLVL=2
|
311 |
+
++ CONDA_SHLVL=2
|
312 |
+
++ export CONDA_DEFAULT_ENV=2-1-cu121
|
313 |
+
++ CONDA_DEFAULT_ENV=2-1-cu121
|
314 |
+
++ export 'CONDA_PROMPT_MODIFIER=(2-1-cu121) '
|
315 |
+
++ CONDA_PROMPT_MODIFIER='(2-1-cu121) '
|
316 |
+
++ export CONDA_PREFIX_1=/fsx/nouamane/miniconda
|
317 |
+
++ CONDA_PREFIX_1=/fsx/nouamane/miniconda
|
318 |
+
++ export CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
319 |
+
++ CONDA_EXE=/fsx/nouamane/miniconda/bin/conda
|
320 |
+
++ export _CE_M=
|
321 |
+
++ _CE_M=
|
322 |
+
++ export _CE_CONDA=
|
323 |
+
++ _CE_CONDA=
|
324 |
+
++ export CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
325 |
+
++ CONDA_PYTHON_EXE=/fsx/nouamane/miniconda/bin/python
|
326 |
+
++ . /fsx/nouamane/miniconda/envs/2-1-cu121/etc/conda/activate.d/libxml2_activate.sh
|
327 |
+
+++ test -n ''
|
328 |
+
+++ xml_catalog_files_libxml2=
|
329 |
+
+++ XML_CATALOG_FILES=
|
330 |
+
+++ conda_catalog_files=
|
331 |
+
+++ ifs_libxml2='
|
332 |
+
'
|
333 |
+
+++ IFS=' '
|
334 |
+
+++ rem=/fsx/nouamane/miniconda/envs/2-1-cu121
|
335 |
+
+++ for pre in ${rem}
|
336 |
+
+++ test '' = /fsx/nouamane/miniconda/envs/2-1-cu121
|
337 |
+
+++ conda_catalog_files=/fsx/nouamane/miniconda/envs/2-1-cu121
|
338 |
+
+++ rem=
|
339 |
+
+++ IFS='
|
340 |
+
'
|
341 |
+
+++ conda_catalog_files='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
342 |
+
+++ export 'XML_CATALOG_FILES=file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
343 |
+
+++ XML_CATALOG_FILES='file:///fsx/nouamane/miniconda/envs/2-1-cu121/etc/xml/catalog file:///etc/xml/catalog'
|
344 |
+
+++ unset conda_catalog_files ifs_libxml2 rem
|
345 |
+
+ __conda_hashr
|
346 |
+
+ '[' -n '' ']'
|
347 |
+
+ '[' -n '' ']'
|
348 |
+
+ hash -r
|
349 |
+
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
350 |
+
+ PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/usr/local/cuda-12.1/efa/test-cuda-12.1:/admin/home/nouamane/.local/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/envs/2-1-cu121/bin:/fsx/nouamane/miniconda/condabin:/opt/slurm/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/include:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/slurm/bin:/admin/home/nouamane/.fzf/bin
|
351 |
+
++ scontrol show hostnames 'ip-26-0-160-[103,242],ip-26-0-165-213,ip-26-0-168-238,ip-26-0-169-[132,139],ip-26-0-171-[21,56]'
|
352 |
+
+ export 'NODELIST=ip-26-0-160-103
|
353 |
+
ip-26-0-160-242
|
354 |
+
ip-26-0-165-213
|
355 |
+
ip-26-0-168-238
|
356 |
+
ip-26-0-169-132
|
357 |
+
ip-26-0-169-139
|
358 |
+
ip-26-0-171-21
|
359 |
+
ip-26-0-171-56'
|
360 |
+
+ NODELIST='ip-26-0-160-103
|
361 |
+
ip-26-0-160-242
|
362 |
+
ip-26-0-165-213
|
363 |
+
ip-26-0-168-238
|
364 |
+
ip-26-0-169-132
|
365 |
+
ip-26-0-169-139
|
366 |
+
ip-26-0-171-21
|
367 |
+
ip-26-0-171-56'
|
368 |
+
++ head -n1
|
369 |
+
++ scontrol show hostnames 'ip-26-0-160-[103,242],ip-26-0-165-213,ip-26-0-168-238,ip-26-0-169-[132,139],ip-26-0-171-[21,56]'
|
370 |
+
+ export MASTER_NODE=ip-26-0-160-103
|
371 |
+
+ MASTER_NODE=ip-26-0-160-103
|
372 |
+
+ export MASTER_PORT=12356
|
373 |
+
+ MASTER_PORT=12356
|
374 |
+
+ export NNODES=8
|
375 |
+
+ NNODES=8
|
376 |
+
+ export GPUS_PER_NODE=8
|
377 |
+
+ GPUS_PER_NODE=8
|
378 |
+
+ export WORLD_SIZE=64
|
379 |
+
+ WORLD_SIZE=64
|
380 |
+
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
|
381 |
+
+ CUDA_DEVICE_MAX_CONNECTIONS=1
|
382 |
+
+ export NCCL_DEBUG=WARN
|
383 |
+
+ NCCL_DEBUG=WARN
|
384 |
+
+ export NANOTRON_BENCHMARK=1
|
385 |
+
+ NANOTRON_BENCHMARK=1
|
386 |
+
+ export WANDB_MODE=disabled
|
387 |
+
+ WANDB_MODE=disabled
|
388 |
+
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
389 |
+
+ TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
390 |
+
+ echo '=== GPU Topology ==='
|
391 |
+
=== GPU Topology ===
|
392 |
+
+ nvidia-smi topo -m
|
393 |
+
[4mGPU0 GPU1 GPU2 GPU3 GPU4 GPU5 GPU6 GPU7 CPU Affinity NUMA Affinity GPU NUMA ID[0m
|
394 |
+
GPU0 X NV18 NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
395 |
+
GPU1 NV18 X NV18 NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
396 |
+
GPU2 NV18 NV18 X NV18 NV18 NV18 NV18 NV18 0-47 0 N/A
|
397 |
+
GPU3 NV18 NV18 NV18 X NV18 NV18 NV18 NV18 0-47 0 N/A
|
398 |
+
GPU4 NV18 NV18 NV18 NV18 X NV18 NV18 NV18 48-95 1 N/A
|
399 |
+
GPU5 NV18 NV18 NV18 NV18 NV18 X NV18 NV18 48-95 1 N/A
|
400 |
+
GPU6 NV18 NV18 NV18 NV18 NV18 NV18 X NV18 48-95 1 N/A
|
401 |
+
GPU7 NV18 NV18 NV18 NV18 NV18 NV18 NV18 X 48-95 1 N/A
|
402 |
+
|
403 |
+
Legend:
|
404 |
+
|
405 |
+
X = Self
|
406 |
+
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
|
407 |
+
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
|
408 |
+
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
|
409 |
+
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
|
410 |
+
PIX = Connection traversing at most a single PCIe bridge
|
411 |
+
NV# = Connection traversing a bonded set of # NVLinks
|
412 |
+
+ echo ==================
|
413 |
+
==================
|
414 |
+
+ echo 'Master node: ip-26-0-160-103'
|
415 |
+
Master node: ip-26-0-160-103
|
416 |
+
+ echo 'All nodes: ip-26-0-160-103
|
417 |
+
ip-26-0-160-242
|
418 |
+
ip-26-0-165-213
|
419 |
+
ip-26-0-168-238
|
420 |
+
ip-26-0-169-132
|
421 |
+
ip-26-0-169-139
|
422 |
+
ip-26-0-171-21
|
423 |
+
ip-26-0-171-56'
|
424 |
+
All nodes: ip-26-0-160-103
|
425 |
+
ip-26-0-160-242
|
426 |
+
ip-26-0-165-213
|
427 |
+
ip-26-0-168-238
|
428 |
+
ip-26-0-169-132
|
429 |
+
ip-26-0-169-139
|
430 |
+
ip-26-0-171-21
|
431 |
+
ip-26-0-171-56
|
432 |
+
+ echo 'World size: 64'
|
433 |
+
World size: 64
|
434 |
+
+ srun --wait=0 --kill-on-bad-exit=1 torchrun --nnodes=8 --nproc_per_node=8 --rdzv_id=13460158 --rdzv_backend=c10d --rdzv_endpoint=ip-26-0-160-103:12356 --max_restarts 0 --rdzv_conf timeout=60 /fsx/nouamane/projects/nanotron/run_train.py --config-file benchmark/configs/config_3.57G_dp4_tp4_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
435 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
436 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
437 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING]
|
438 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] *****************************************
|
439 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
440 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] *****************************************
|
441 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING]
|
442 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] *****************************************
|
443 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
444 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] *****************************************
|
445 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
446 |
+
[2024-12-15 19:44:58,400] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
447 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING]
|
448 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] *****************************************
|
449 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
450 |
+
[2024-12-15 19:44:58,399] torch.distributed.run: [WARNING] *****************************************
|
451 |
+
[2024-12-15 19:44:58,414] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
452 |
+
[2024-12-15 19:44:58,415] torch.distributed.run: [WARNING]
|
453 |
+
[2024-12-15 19:44:58,415] torch.distributed.run: [WARNING] *****************************************
|
454 |
+
[2024-12-15 19:44:58,415] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
455 |
+
[2024-12-15 19:44:58,415] torch.distributed.run: [WARNING] *****************************************
|
456 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING]
|
457 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] *****************************************
|
458 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
459 |
+
[2024-12-15 19:44:58,401] torch.distributed.run: [WARNING] *****************************************
|
460 |
+
[2024-12-15 19:44:59,038] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
461 |
+
[2024-12-15 19:44:59,038] torch.distributed.run: [WARNING]
|
462 |
+
[2024-12-15 19:44:59,038] torch.distributed.run: [WARNING] *****************************************
|
463 |
+
[2024-12-15 19:44:59,038] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
464 |
+
[2024-12-15 19:44:59,038] torch.distributed.run: [WARNING] *****************************************
|
465 |
+
[2024-12-15 19:44:59,616] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
466 |
+
[2024-12-15 19:44:59,617] torch.distributed.run: [WARNING]
|
467 |
+
[2024-12-15 19:44:59,617] torch.distributed.run: [WARNING] *****************************************
|
468 |
+
[2024-12-15 19:44:59,617] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
469 |
+
[2024-12-15 19:44:59,617] torch.distributed.run: [WARNING] *****************************************
|
470 |
+
[2024-12-15 19:45:00,167] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
471 |
+
[2024-12-15 19:45:00,168] torch.distributed.run: [WARNING]
|
472 |
+
[2024-12-15 19:45:00,168] torch.distributed.run: [WARNING] *****************************************
|
473 |
+
[2024-12-15 19:45:00,168] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
474 |
+
[2024-12-15 19:45:00,168] torch.distributed.run: [WARNING] *****************************************
|
475 |
+
NCCL version 2.18.5+cuda12.2
|
476 |
+
12/15/2024 19:45:27 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Measuring inter-GPU and intra-node bandwidth...
|
477 |
+
NCCL version 2.18.5+cuda12.2
|
478 |
+
NCCL version 2.18.5+cuda12.2
|
479 |
+
NCCL version 2.18.5+cuda12.2
|
480 |
+
NCCL version 2.18.5+cuda12.2
|
481 |
+
NCCL version 2.18.5+cuda12.2
|
482 |
+
NCCL version 2.18.5+cuda12.2
|
483 |
+
NCCL version 2.18.5+cuda12.2
|
484 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Bandwidth measurement complete. Time taken: 17.96 seconds
|
485 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Config:
|
486 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Config(general=GeneralArgs(project='debug',
|
487 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: run='3.57G_dp4_tp4_pp4_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k',
|
488 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: seed=42,
|
489 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: step=None,
|
490 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: consumed_train_samples=None,
|
491 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
492 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: ignore_sanity_checks=True),
|
493 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: parallelism=ParallelismArgs(dp=4,
|
494 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pp=4,
|
495 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tp=4,
|
496 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f05cf920e20>,
|
497 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
498 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tp_linear_async_communication=True,
|
499 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: recompute_layer=False,
|
500 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tp_recompute_allgather=True,
|
501 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: expert_parallel_size=1),
|
502 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
503 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: eos_token_id=0,
|
504 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: hidden_act='silu',
|
505 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: hidden_size=3072,
|
506 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: initializer_range=0.02,
|
507 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: intermediate_size=8192,
|
508 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: is_llama_config=True,
|
509 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: max_position_embeddings=4096,
|
510 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_attention_heads=32,
|
511 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_hidden_layers=28,
|
512 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_key_value_heads=32,
|
513 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pad_token_id=None,
|
514 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pretraining_tp=1,
|
515 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rms_norm_eps=1e-05,
|
516 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_scaling=None,
|
517 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_theta=10000.0,
|
518 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_interleaved=False,
|
519 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tie_word_embeddings=True,
|
520 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: use_cache=True,
|
521 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: vocab_size=131072),
|
522 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: init_method=RandomInit(std=0.02),
|
523 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: dtype=torch.bfloat16,
|
524 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: make_vocab_size_divisible_by=1,
|
525 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: ddp_bucket_cap_mb=25),
|
526 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
527 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tokenizer_revision=None,
|
528 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tokenizer_max_length=None),
|
529 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
530 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: checkpoint_interval=10000,
|
531 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: save_initial_state=False,
|
532 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: save_final_state=False,
|
533 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: resume_checkpoint_path=None,
|
534 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: checkpoints_path_is_shared_file_system=False),
|
535 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: logging=LoggingArgs(log_level='info',
|
536 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: log_level_replica='info',
|
537 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: iteration_step_info_interval=1),
|
538 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tokens=TokensArgs(sequence_length=4096,
|
539 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: train_steps=100,
|
540 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: micro_batch_size=1,
|
541 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: batch_accumulation_per_replica=64,
|
542 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: val_check_interval=100,
|
543 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: limit_val_batches=0,
|
544 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: limit_test_batches=0),
|
545 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
546 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: adam_beta1=0.9,
|
547 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: adam_beta2=0.95,
|
548 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: torch_adam_is_fused=True,
|
549 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: name='adamW'),
|
550 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: zero_stage=1,
|
551 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: weight_decay=0.01,
|
552 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: clip_grad=1.0,
|
553 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: accumulate_grad_in_fp32=True,
|
554 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
555 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lr_warmup_steps=2,
|
556 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lr_warmup_style='linear',
|
557 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lr_decay_style='cosine',
|
558 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lr_decay_steps=13,
|
559 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lr_decay_starting_step=None,
|
560 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: min_decay_lr=1e-05)),
|
561 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
562 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: start_training_step=1,
|
563 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: data=DataArgs(dataset=None,
|
564 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: seed=42,
|
565 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_loading_workers=1))],
|
566 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: profiler=None,
|
567 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: lighteval=None,
|
568 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: s3_upload=None)
|
569 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Model Config:
|
570 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: LlamaConfig(bos_token_id=0,
|
571 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: eos_token_id=0,
|
572 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: hidden_act='silu',
|
573 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: hidden_size=3072,
|
574 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: initializer_range=0.02,
|
575 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: intermediate_size=8192,
|
576 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: is_llama_config=True,
|
577 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: max_position_embeddings=4096,
|
578 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_attention_heads=32,
|
579 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_hidden_layers=28,
|
580 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: num_key_value_heads=32,
|
581 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pad_token_id=None,
|
582 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: pretraining_tp=1,
|
583 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rms_norm_eps=1e-05,
|
584 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_scaling=None,
|
585 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_theta=10000.0,
|
586 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: rope_interleaved=False,
|
587 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: tie_word_embeddings=True,
|
588 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: use_cache=True,
|
589 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: vocab_size=131072)
|
590 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Building model..
|
591 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Initialize RoPE Theta = 10000.0
|
592 |
+
12/15/2024 19:45:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Setting PP block ranks...
|
593 |
+
NCCL version 2.18.5+cuda12.2
|
594 |
+
NCCL version 2.18.5+cuda12.2
|
595 |
+
NCCL version 2.18.5+cuda12.2
|
596 |
+
NCCL version 2.18.5+cuda12.2
|
597 |
+
NCCL version 2.18.5+cuda12.2
|
598 |
+
NCCL version 2.18.5+cuda12.2
|
599 |
+
NCCL version 2.18.5+cuda12.2
|
600 |
+
NCCL version 2.18.5+cuda12.2
|
601 |
+
NCCL version 2.18.5+cuda12.2
|
602 |
+
NCCL version 2.18.5+cuda12.2
|
603 |
+
NCCL version 2.18.5+cuda12.2
|
604 |
+
NCCL version 2.18.5+cuda12.2
|
605 |
+
NCCL version 2.18.5+cuda12.2
|
606 |
+
NCCL version 2.18.5+cuda12.2
|
607 |
+
NCCL version 2.18.5+cuda12.2
|
608 |
+
NCCL version 2.18.5+cuda12.2
|
609 |
+
NCCL version 2.18.5+cuda12.2
|
610 |
+
NCCL version 2.18.5+cuda12.2
|
611 |
+
NCCL version 2.18.5+cuda12.2
|
612 |
+
NCCL version 2.18.5+cuda12.2
|
613 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=0|ip-26-0-169-132]: Local number of parameters: 227M (432.09MiB)
|
614 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Total number of parameters: 3.98G (7585.34MiB)
|
615 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Local number of parameters: 327M (624.09MiB)
|
616 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=0|ip-26-0-169-132]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
617 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=0|ip-26-0-171-21]: Local number of parameters: 214M (408.05MiB)
|
618 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [After model building] Memory usage: 624.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
619 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=0|ip-26-0-171-21]: [After model building] Memory usage: 408.57MiB. Peak allocated: 5408.00MiB Peak reserved: 21794.00MiB
|
620 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: No checkpoint path provided.
|
621 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Parametrizing model parameters using StandardParametrizator
|
622 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-103]: Local number of parameters: 327M (624.09MiB)
|
623 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-103]: [After model building] Memory usage: 624.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
624 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-103]: Local number of parameters: 327M (624.09MiB)
|
625 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-103]: [After model building] Memory usage: 624.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
626 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=0|ip-26-0-165-213]: Local number of parameters: 227M (432.09MiB)
|
627 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=0|ip-26-0-165-213]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
628 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-103]: Local number of parameters: 327M (624.09MiB)
|
629 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-103]: [After model building] Memory usage: 624.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
630 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=1|ip-26-0-169-132]: Local number of parameters: 227M (432.09MiB)
|
631 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=1|ip-26-0-165-213]: Local number of parameters: 227M (432.09MiB)
|
632 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=1|ip-26-0-171-21]: Local number of parameters: 214M (408.05MiB)
|
633 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=1|ip-26-0-165-213]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
634 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=1|ip-26-0-169-132]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
635 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=1|ip-26-0-171-21]: [After model building] Memory usage: 408.57MiB. Peak allocated: 5408.00MiB Peak reserved: 21794.00MiB
|
636 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=2|ip-26-0-171-21]: Local number of parameters: 214M (408.05MiB)
|
637 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=2|ip-26-0-169-132]: Local number of parameters: 227M (432.09MiB)
|
638 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=2|ip-26-0-165-213]: Local number of parameters: 227M (432.09MiB)
|
639 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=2|ip-26-0-171-21]: [After model building] Memory usage: 408.57MiB. Peak allocated: 5408.00MiB Peak reserved: 20770.00MiB
|
640 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=2|ip-26-0-169-132]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
641 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=2|ip-26-0-165-213]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
642 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=3|ip-26-0-165-213]: Local number of parameters: 227M (432.09MiB)
|
643 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=3|ip-26-0-169-132]: Local number of parameters: 227M (432.09MiB)
|
644 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=3|ip-26-0-171-21]: Local number of parameters: 214M (408.05MiB)
|
645 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=1|TP=3|ip-26-0-165-213]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
646 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=2|TP=3|ip-26-0-169-132]: [After model building] Memory usage: 432.61MiB. Peak allocated: 5408.00MiB Peak reserved: 22818.00MiB
|
647 |
+
12/15/2024 19:45:51 [INFO|DP=0|PP=3|TP=3|ip-26-0-171-21]: [After model building] Memory usage: 408.57MiB. Peak allocated: 5408.00MiB Peak reserved: 21794.00MiB
|
648 |
+
NCCL version 2.18.5+cuda12.2
|
649 |
+
NCCL version 2.18.5+cuda12.2
|
650 |
+
NCCL version 2.18.5+cuda12.2
|
651 |
+
NCCL version 2.18.5+cuda12.2
|
652 |
+
NCCL version 2.18.5+cuda12.2
|
653 |
+
NCCL version 2.18.5+cuda12.2
|
654 |
+
NCCL version 2.18.5+cuda12.2
|
655 |
+
NCCL version 2.18.5+cuda12.2
|
656 |
+
NCCL version 2.18.5+cuda12.2
|
657 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
658 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
659 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [Optimizer Building] Using LearningRateForSP as learning rate
|
660 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [ZeRO sharding] Size of optimizer params per rank:
|
661 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [ZeRO sharding] DP Rank 0 has 81.8M out of 327M (25.00%) params' optimizer states
|
662 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [ZeRO sharding] DP Rank 1 has 81.8M out of 327M (25.00%) params' optimizer states
|
663 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [ZeRO sharding] DP Rank 2 has 81.8M out of 327M (25.00%) params' optimizer states
|
664 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [ZeRO sharding] DP Rank 3 has 81.8M out of 327M (25.00%) params' optimizer states
|
665 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
666 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
667 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
668 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
669 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
670 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
671 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
672 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
673 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
674 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
675 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
676 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
677 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
678 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
679 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
680 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
681 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
682 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
683 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
684 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
685 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
686 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
687 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
688 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
689 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
690 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
691 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
692 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
693 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
694 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
695 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
696 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Using dummy data generator
|
697 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [Training Plan] There are 1 training stages
|
698 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [Stage Stable Training Stage] start from step 1
|
699 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]:
|
700 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: [Start training] datetime: 2024-12-15 19:45:55.550908 | mbs: 1 | grad_accum: 64 | global_batch_size: 256 | sequence_length: 4096 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
701 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
702 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
703 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
704 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
705 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
706 |
+
12/15/2024 19:45:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-103]: Memory usage: 2184.84MiB. Peak allocated 5408.00MiB. Peak reserved: 24068.00MiB
|
707 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
708 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
709 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
710 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
711 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
712 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
713 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
714 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
715 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
716 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
717 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
718 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
719 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
720 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
721 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
722 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
723 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
724 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
725 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
726 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
727 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
728 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
729 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
730 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
731 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
732 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
733 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
734 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
735 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
736 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
737 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
738 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
739 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
740 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
741 |
+
NCCL version 2.18.5+cuda12.2
|
742 |
+
NCCL version 2.18.5+cuda12.2
|
743 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
744 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
745 |
+
NCCL version 2.18.5+cuda12.2
|
746 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
747 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
748 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
749 |
+
NCCL version 2.18.5+cuda12.2
|
750 |
+
NCCL version 2.18.5+cuda12.2
|
751 |
+
NCCL version 2.18.5+cuda12.2
|
752 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
753 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
754 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
755 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
756 |
+
NCCL version 2.18.5+cuda12.2
|
757 |
+
NCCL version 2.18.5+cuda12.2
|
758 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
759 |
+
NCCL version 2.18.5+cuda12.2
|
760 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
761 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
762 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
763 |
+
NCCL version 2.18.5+cuda12.2
|
764 |
+
NCCL version 2.18.5+cuda12.2
|
765 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
766 |
+
NCCL version 2.18.5+cuda12.2
|
767 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
768 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
769 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
770 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
771 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
772 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
773 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
774 |
+
NCCL version 2.18.5+cuda12.2
|
775 |
+
NCCL version 2.18.5+cuda12.2
|
776 |
+
NCCL version 2.18.5+cuda12.2
|
777 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
778 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
779 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
780 |
+
NCCL version 2.18.5+cuda12.2
|
781 |
+
NCCL version 2.18.5+cuda12.2
|
782 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
783 |
+
NCCL version 2.18.5+cuda12.2
|
784 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
785 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
786 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
787 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
788 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
789 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
790 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
791 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
792 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
793 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
794 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
795 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
796 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
797 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
798 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
799 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
800 |
+
[W ProcessGroupNCCL.cpp:1856] Warning: 0NCCL_AVOID_RECORD_STREAMS=1 has no effect for point-to-point collectives. (function operator())
|
801 |
+
slurmstepd: error: *** STEP 13460158.0 ON ip-26-0-160-103 CANCELLED AT 2024-12-15T19:55:17 DUE TO TIME LIMIT ***
|
802 |
+
slurmstepd: error: *** JOB 13460158 ON ip-26-0-160-103 CANCELLED AT 2024-12-15T19:55:17 DUE TO TIME LIMIT ***
|
803 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
804 |
+
[2024-12-15 19:55:17,174] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
805 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160097 closing signal SIGTERM
|
806 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21212 closing signal SIGTERM
|
807 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160098 closing signal SIGTERM
|
808 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21213 closing signal SIGTERM
|
809 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160099 closing signal SIGTERM
|
810 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21214 closing signal SIGTERM
|
811 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160100 closing signal SIGTERM
|
812 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21215 closing signal SIGTERM
|
813 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
814 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38743 closing signal SIGTERM
|
815 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38744 closing signal SIGTERM
|
816 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38745 closing signal SIGTERM
|
817 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38746 closing signal SIGTERM
|
818 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
819 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21216 closing signal SIGTERM
|
820 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21217 closing signal SIGTERM
|
821 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21218 closing signal SIGTERM
|
822 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38747 closing signal SIGTERM
|
823 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21219 closing signal SIGTERM
|
824 |
+
[2024-12-15 19:55:17,178] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160101 closing signal SIGTERM
|
825 |
+
[2024-12-15 19:55:17,179] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160102 closing signal SIGTERM
|
826 |
+
[2024-12-15 19:55:17,179] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160103 closing signal SIGTERM
|
827 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38748 closing signal SIGTERM
|
828 |
+
[2024-12-15 19:55:17,179] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 160104 closing signal SIGTERM
|
829 |
+
[2024-12-15 19:55:17,178] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38749 closing signal SIGTERM
|
830 |
+
[2024-12-15 19:55:17,178] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 38750 closing signal SIGTERM
|
831 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21212 closing signal SIGTERM
|
832 |
+
[2024-12-15 19:55:17,176] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
833 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60573 closing signal SIGTERM
|
834 |
+
[2024-12-15 19:55:17,177] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
835 |
+
[2024-12-15 19:55:17,175] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
836 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21213 closing signal SIGTERM
|
837 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21214 closing signal SIGTERM
|
838 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165203 closing signal SIGTERM
|
839 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21215 closing signal SIGTERM
|
840 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60574 closing signal SIGTERM
|
841 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21216 closing signal SIGTERM
|
842 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142734 closing signal SIGTERM
|
843 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21217 closing signal SIGTERM
|
844 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21218 closing signal SIGTERM
|
845 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60575 closing signal SIGTERM
|
846 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 21219 closing signal SIGTERM
|
847 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60576 closing signal SIGTERM
|
848 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60577 closing signal SIGTERM
|
849 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165204 closing signal SIGTERM
|
850 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60578 closing signal SIGTERM
|
851 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60579 closing signal SIGTERM
|
852 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165205 closing signal SIGTERM
|
853 |
+
[2024-12-15 19:55:17,181] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60580 closing signal SIGTERM
|
854 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142735 closing signal SIGTERM
|
855 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165206 closing signal SIGTERM
|
856 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165207 closing signal SIGTERM
|
857 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142736 closing signal SIGTERM
|
858 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165208 closing signal SIGTERM
|
859 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142737 closing signal SIGTERM
|
860 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165209 closing signal SIGTERM
|
861 |
+
[2024-12-15 19:55:17,177] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
862 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142738 closing signal SIGTERM
|
863 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165210 closing signal SIGTERM
|
864 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142739 closing signal SIGTERM
|
865 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142740 closing signal SIGTERM
|
866 |
+
[2024-12-15 19:55:17,180] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142741 closing signal SIGTERM
|
867 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196703 closing signal SIGTERM
|
868 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196704 closing signal SIGTERM
|
869 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196705 closing signal SIGTERM
|
870 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196706 closing signal SIGTERM
|
871 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196707 closing signal SIGTERM
|
872 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196708 closing signal SIGTERM
|
873 |
+
[2024-12-15 19:55:17,182] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196709 closing signal SIGTERM
|
874 |
+
[2024-12-15 19:55:17,183] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196710 closing signal SIGTERM
|
875 |
+
[2024-12-15 19:55:17,178] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
876 |
+
[2024-12-15 19:55:17,184] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410324 closing signal SIGTERM
|
877 |
+
[2024-12-15 19:55:17,184] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410325 closing signal SIGTERM
|
878 |
+
[2024-12-15 19:55:17,184] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410326 closing signal SIGTERM
|
879 |
+
[2024-12-15 19:55:17,185] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410327 closing signal SIGTERM
|
880 |
+
[2024-12-15 19:55:17,185] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410328 closing signal SIGTERM
|
881 |
+
[2024-12-15 19:55:17,185] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410329 closing signal SIGTERM
|
882 |
+
[2024-12-15 19:55:17,185] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410330 closing signal SIGTERM
|
883 |
+
[2024-12-15 19:55:17,185] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410331 closing signal SIGTERM
|
884 |
+
[2024-12-15 19:55:17,208] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165203 closing signal SIGTERM
|
885 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165204 closing signal SIGTERM
|
886 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165205 closing signal SIGTERM
|
887 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165206 closing signal SIGTERM
|
888 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165207 closing signal SIGTERM
|
889 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165208 closing signal SIGTERM
|
890 |
+
[2024-12-15 19:55:17,209] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165209 closing signal SIGTERM
|
891 |
+
[2024-12-15 19:55:17,210] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 165210 closing signal SIGTERM
|
892 |
+
[2024-12-15 19:55:17,211] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142734 closing signal SIGTERM
|
893 |
+
[2024-12-15 19:55:17,211] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142735 closing signal SIGTERM
|
894 |
+
[2024-12-15 19:55:17,213] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142736 closing signal SIGTERM
|
895 |
+
[2024-12-15 19:55:17,213] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142737 closing signal SIGTERM
|
896 |
+
[2024-12-15 19:55:17,213] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142738 closing signal SIGTERM
|
897 |
+
[2024-12-15 19:55:17,213] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142739 closing signal SIGTERM
|
898 |
+
[2024-12-15 19:55:17,214] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60573 closing signal SIGTERM
|
899 |
+
[2024-12-15 19:55:17,215] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196703 closing signal SIGTERM
|
900 |
+
[2024-12-15 19:55:17,215] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196704 closing signal SIGTERM
|
901 |
+
[2024-12-15 19:55:17,215] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196705 closing signal SIGTERM
|
902 |
+
[2024-12-15 19:55:17,215] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196706 closing signal SIGTERM
|
903 |
+
[2024-12-15 19:55:17,214] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142740 closing signal SIGTERM
|
904 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196707 closing signal SIGTERM
|
905 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196708 closing signal SIGTERM
|
906 |
+
[2024-12-15 19:55:17,214] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 142741 closing signal SIGTERM
|
907 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196709 closing signal SIGTERM
|
908 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 196710 closing signal SIGTERM
|
909 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60574 closing signal SIGTERM
|
910 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60575 closing signal SIGTERM
|
911 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60576 closing signal SIGTERM
|
912 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60577 closing signal SIGTERM
|
913 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60578 closing signal SIGTERM
|
914 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60579 closing signal SIGTERM
|
915 |
+
[2024-12-15 19:55:17,216] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 60580 closing signal SIGTERM
|
916 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410324 closing signal SIGTERM
|
917 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410325 closing signal SIGTERM
|
918 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410326 closing signal SIGTERM
|
919 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410327 closing signal SIGTERM
|
920 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410328 closing signal SIGTERM
|
921 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410329 closing signal SIGTERM
|
922 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410330 closing signal SIGTERM
|
923 |
+
[2024-12-15 19:55:17,224] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 410331 closing signal SIGTERM
|
logs/13490790-2024-12-18-10-40-58-bench_8.86G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
srun: Job 13490790 step creation temporarily disabled, retrying (Requested nodes are busy)
|
2 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
3 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
4 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
5 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
6 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
7 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
8 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
9 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
10 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
11 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
12 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
13 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
14 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
15 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
16 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
17 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
18 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
19 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
20 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
21 |
+
srun: Job 13490790 step creation still disabled, retrying (Requested nodes are busy)
|
22 |
+
srun: error: Unable to create step for job 13490790: Job/step already completing or completed
|
logs/13490809-2024-12-18-11-24-42-bench_8.86G_dp2_tp8_pp1_acc4_mbs32_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
srun: Job 13490809 step creation temporarily disabled, retrying (Requested nodes are busy)
|
2 |
+
srun: Job 13490809 step creation still disabled, retrying (Requested nodes are busy)
|
3 |
+
srun: Cancelled pending job step with signal 15
|
4 |
+
srun: error: Unable to create step for job 13490809: Job/step already completing or completed
|
logs/13490809-2024-12-18-11-33-26-bench_8.86G_dp2_tp8_pp1_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,849 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
srun: Job 13490809 step creation temporarily disabled, retrying (Requested nodes are busy)
|
2 |
+
srun: Job 13490809 step creation still disabled, retrying (Requested nodes are busy)
|
3 |
+
srun: Job 13490809 step creation still disabled, retrying (Requested nodes are busy)
|
4 |
+
srun: Step created for job 13490809
|
5 |
+
[2024-12-18 11:38:26,440] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
6 |
+
[2024-12-18 11:38:26,443] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
7 |
+
[2024-12-18 11:38:26,443] torch.distributed.run: [WARNING]
|
8 |
+
[2024-12-18 11:38:26,443] torch.distributed.run: [WARNING] *****************************************
|
9 |
+
[2024-12-18 11:38:26,443] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
10 |
+
[2024-12-18 11:38:26,443] torch.distributed.run: [WARNING] *****************************************
|
11 |
+
[2024-12-18 11:38:26,440] torch.distributed.run: [WARNING]
|
12 |
+
[2024-12-18 11:38:26,440] torch.distributed.run: [WARNING] *****************************************
|
13 |
+
[2024-12-18 11:38:26,440] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
14 |
+
[2024-12-18 11:38:26,440] torch.distributed.run: [WARNING] *****************************************
|
15 |
+
NCCL version 2.18.5+cuda12.2
|
16 |
+
12/18/2024 11:38:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Measuring inter-GPU and intra-node bandwidth...
|
17 |
+
NCCL version 2.18.5+cuda12.2
|
18 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Bandwidth measurement complete. Time taken: 16.00 seconds
|
19 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config:
|
20 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config(general=GeneralArgs(project='debug',
|
21 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: run='8.86G_dp2_tp8_pp1_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k',
|
22 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
23 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: step=None,
|
24 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: consumed_train_samples=None,
|
25 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
26 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ignore_sanity_checks=True),
|
27 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: parallelism=ParallelismArgs(dp=2,
|
28 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp=1,
|
29 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp=8,
|
30 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f29ce35cb80>,
|
31 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
32 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_linear_async_communication=True,
|
33 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: recompute_layer=False,
|
34 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_recompute_allgather=True,
|
35 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: expert_parallel_size=1),
|
36 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
37 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=0,
|
38 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
39 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=4096,
|
40 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
41 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=14336,
|
42 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
43 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
44 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
|
45 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=32,
|
46 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
|
47 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
48 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
49 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
50 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
51 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
52 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_interleaved=False,
|
53 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=False,
|
54 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
55 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=131072),
|
56 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: init_method=RandomInit(std=0.02),
|
57 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dtype=torch.bfloat16,
|
58 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: make_vocab_size_divisible_by=1,
|
59 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ddp_bucket_cap_mb=25),
|
60 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
61 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_revision=None,
|
62 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_max_length=None),
|
63 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
64 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoint_interval=10000,
|
65 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_initial_state=False,
|
66 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_final_state=False,
|
67 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: resume_checkpoint_path=None,
|
68 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints_path_is_shared_file_system=False),
|
69 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: logging=LoggingArgs(log_level='info',
|
70 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: log_level_replica='info',
|
71 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration_step_info_interval=1),
|
72 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokens=TokensArgs(sequence_length=4096,
|
73 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: train_steps=100,
|
74 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: micro_batch_size=32,
|
75 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: batch_accumulation_per_replica=4,
|
76 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: val_check_interval=100,
|
77 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_val_batches=0,
|
78 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_test_batches=0),
|
79 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
80 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta1=0.9,
|
81 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta2=0.95,
|
82 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: torch_adam_is_fused=True,
|
83 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: name='adamW'),
|
84 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: zero_stage=1,
|
85 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: weight_decay=0.01,
|
86 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: clip_grad=1.0,
|
87 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: accumulate_grad_in_fp32=True,
|
88 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
89 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_steps=2,
|
90 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_style='linear',
|
91 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_style='cosine',
|
92 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_steps=13,
|
93 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_starting_step=None,
|
94 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: min_decay_lr=1e-05)),
|
95 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
96 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: start_training_step=1,
|
97 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data=DataArgs(dataset=None,
|
98 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
99 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_loading_workers=1))],
|
100 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: profiler=None,
|
101 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lighteval=None,
|
102 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: s3_upload=None)
|
103 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Model Config:
|
104 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: LlamaConfig(bos_token_id=0,
|
105 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=0,
|
106 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
107 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=4096,
|
108 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
109 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=14336,
|
110 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
111 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
112 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
|
113 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=32,
|
114 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
|
115 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
116 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
117 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
118 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
119 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
120 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_interleaved=False,
|
121 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=False,
|
122 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
123 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=131072)
|
124 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Building model..
|
125 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Initialize RoPE Theta = 10000.0
|
126 |
+
12/18/2024 11:39:04 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Setting PP block ranks...
|
127 |
+
NCCL version 2.18.5+cuda12.2
|
128 |
+
NCCL version 2.18.5+cuda12.2
|
129 |
+
NCCL version 2.18.5+cuda12.2
|
130 |
+
NCCL version 2.18.5+cuda12.2
|
131 |
+
NCCL version 2.18.5+cuda12.2
|
132 |
+
NCCL version 2.18.5+cuda12.2
|
133 |
+
NCCL version 2.18.5+cuda12.2
|
134 |
+
NCCL version 2.18.5+cuda12.2
|
135 |
+
NCCL version 2.18.5+cuda12.2
|
136 |
+
NCCL version 2.18.5+cuda12.2
|
137 |
+
NCCL version 2.18.5+cuda12.2
|
138 |
+
NCCL version 2.18.5+cuda12.2
|
139 |
+
NCCL version 2.18.5+cuda12.2
|
140 |
+
NCCL version 2.18.5+cuda12.2
|
141 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Total number of parameters: 8.86G (16900.06MiB)
|
142 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
143 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
144 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
145 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
146 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
147 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
148 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
149 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: Local number of parameters: 1.11G (2112.51MiB)
|
150 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 18818.00MiB
|
151 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 17794.00MiB
|
152 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 17794.00MiB
|
153 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 16770.00MiB
|
154 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 16770.00MiB
|
155 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 17794.00MiB
|
156 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 16770.00MiB
|
157 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: [After model building] Memory usage: 2112.55MiB. Peak allocated: 5504.00MiB Peak reserved: 19842.00MiB
|
158 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
|
159 |
+
12/18/2024 11:39:05 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Parametrizing model parameters using StandardParametrizator
|
160 |
+
12/18/2024 11:39:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Optimizer Building] Using LearningRateForSP as learning rate
|
161 |
+
12/18/2024 11:39:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] Size of optimizer params per rank:
|
162 |
+
12/18/2024 11:39:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 0 has 554M out of 1.11G (50.00%) params' optimizer states
|
163 |
+
12/18/2024 11:39:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 1 has 554M out of 1.11G (50.00%) params' optimizer states
|
164 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
165 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Using dummy data generator
|
166 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] There are 1 training stages
|
167 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Stage Stable Training Stage] start from step 1
|
168 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]:
|
169 |
+
12/18/2024 11:39:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Start training] datetime: 2024-12-18 11:39:07.311489 | mbs: 32 | grad_accum: 4 | global_batch_size: 256 | sequence_length: 4096 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
170 |
+
12/18/2024 11:39:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
171 |
+
12/18/2024 11:39:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 8451.06MiB. Peak allocated 8451.06MiB. Peak reserved: 24134.00MiB
|
172 |
+
Traceback (most recent call last):
|
173 |
+
Traceback (most recent call last):
|
174 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
175 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
176 |
+
Traceback (most recent call last):
|
177 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
178 |
+
Traceback (most recent call last):
|
179 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
180 |
+
trainer.train(dataloader)
|
181 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
182 |
+
trainer.train(dataloader)
|
183 |
+
trainer.train(dataloader)
|
184 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
185 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
186 |
+
trainer.train(dataloader)
|
187 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
188 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
189 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader) File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
190 |
+
|
191 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
192 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
193 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
194 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
195 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
196 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
197 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
198 |
+
outputs = self.pipeline_engine.train_batch_iter(outputs = self.pipeline_engine.train_batch_iter(
|
199 |
+
|
200 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
201 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
202 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
203 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
204 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
205 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
206 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
207 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
208 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
209 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
210 |
+
output = model(**micro_batch)
|
211 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
212 |
+
output = model(**micro_batch)
|
213 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
214 |
+
output = model(**micro_batch)
|
215 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
216 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
217 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
218 |
+
output = model(**micro_batch)
|
219 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
220 |
+
return self._call_impl(*args, **kwargs)
|
221 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
222 |
+
return self._call_impl(*args, **kwargs)
|
223 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
224 |
+
return self._call_impl(*args, **kwargs)
|
225 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
226 |
+
return forward_call(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
227 |
+
|
228 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
229 |
+
return forward_call(*args, **kwargs)
|
230 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
231 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
232 |
+
return forward_call(*args, **kwargs)
|
233 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
234 |
+
return forward_call(*args, **kwargs)
|
235 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
236 |
+
sharded_logits = self.model(
|
237 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
238 |
+
sharded_logits = self.model(
|
239 |
+
sharded_logits = self.model(
|
240 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
241 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
242 |
+
sharded_logits = self.model(
|
243 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
244 |
+
return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
245 |
+
|
246 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
247 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
248 |
+
return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
249 |
+
|
250 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
251 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
252 |
+
return forward_call(*args, **kwargs)
|
253 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
254 |
+
return forward_call(*args, **kwargs)
|
255 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
256 |
+
return forward_call(*args, **kwargs)
|
257 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
258 |
+
return forward_call(*args, **kwargs)
|
259 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
260 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
261 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
262 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
263 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
264 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
265 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
266 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
267 |
+
|
268 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
269 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
270 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
271 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
272 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
273 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
274 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
275 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
276 |
+
return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
277 |
+
|
278 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
279 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
280 |
+
return self._call_impl(*args, **kwargs)
|
281 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
282 |
+
return self._call_impl(*args, **kwargs)
|
283 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
284 |
+
return forward_call(*args, **kwargs)
|
285 |
+
return forward_call(*args, **kwargs)
|
286 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
287 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
288 |
+
return forward_call(*args, **kwargs)
|
289 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
290 |
+
return forward_call(*args, **kwargs)
|
291 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
292 |
+
output = self.pp_block(**new_kwargs)
|
293 |
+
output = self.pp_block(**new_kwargs)
|
294 |
+
output = self.pp_block(**new_kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
295 |
+
|
296 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
297 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
298 |
+
output = self.pp_block(**new_kwargs)
|
299 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
300 |
+
return self._call_impl(*args, **kwargs)
|
301 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
302 |
+
return self._call_impl(*args, **kwargs)return self._call_impl(*args, **kwargs)
|
303 |
+
|
304 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
305 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
306 |
+
return self._call_impl(*args, **kwargs)
|
307 |
+
return forward_call(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
308 |
+
|
309 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
310 |
+
return forward_call(*args, **kwargs)
|
311 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
312 |
+
return forward_call(*args, **kwargs)
|
313 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
314 |
+
return forward_call(*args, **kwargs)
|
315 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
316 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
317 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
318 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
319 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
320 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
321 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
322 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
323 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
324 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
325 |
+
|
326 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
327 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
328 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
329 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
330 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
331 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
332 |
+
return self._call_impl(*args, **kwargs)
|
333 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
334 |
+
return self._call_impl(*args, **kwargs)
|
335 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
336 |
+
return self._call_impl(*args, **kwargs)
|
337 |
+
return self._call_impl(*args, **kwargs) File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
338 |
+
|
339 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
340 |
+
return forward_call(*args, **kwargs)
|
341 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
342 |
+
return forward_call(*args, **kwargs)
|
343 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
344 |
+
return forward_call(*args, **kwargs)
|
345 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
346 |
+
return forward_call(*args, **kwargs)
|
347 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
348 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
349 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
350 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
351 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
352 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
353 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
354 |
+
return self._call_impl(*args, **kwargs)
|
355 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
356 |
+
return self._call_impl(*args, **kwargs)
|
357 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
358 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
359 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
360 |
+
return self._call_impl(*args, **kwargs)
|
361 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
362 |
+
return forward_call(*args, **kwargs)
|
363 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
364 |
+
return forward_call(*args, **kwargs)
|
365 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
366 |
+
return forward_call(*args, **kwargs)
|
367 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
368 |
+
return self._call_impl(*args, **kwargs)
|
369 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
370 |
+
return forward_call(*args, **kwargs)
|
371 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
372 |
+
return row_linear(return row_linear(
|
373 |
+
|
374 |
+
return row_linear( File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
375 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
376 |
+
|
377 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
378 |
+
return row_linear(
|
379 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
380 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
381 |
+
|
382 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
383 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
384 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
385 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
386 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
387 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
388 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
389 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
390 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]return super().apply(*args, **kwargs) # type: ignore[misc]
|
391 |
+
|
392 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
393 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
394 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
395 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
396 |
+
out = F.linear(tensor, weight, bias)
|
397 |
+
torch.cuda. OutOfMemoryErrorout = F.linear(tensor, weight, bias):
|
398 |
+
CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 0 has a total capacty of 79.33 GiB of which 707.94 MiB is free. Including non-PyTorch memory, this process has 78.63 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 957.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
399 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 3 has a total capacty of 79.33 GiB of which 363.94 MiB is free. Including non-PyTorch memory, this process has 78.96 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 1.12 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
400 |
+
out = F.linear(tensor, weight, bias)
|
401 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 4 has a total capacty of 79.33 GiB of which 595.94 MiB is free. Including non-PyTorch memory, this process has 78.74 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 1021.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
402 |
+
out = F.linear(tensor, weight, bias)
|
403 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 5 has a total capacty of 79.33 GiB of which 547.94 MiB is free. Including non-PyTorch memory, this process has 78.78 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 1.06 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
404 |
+
Traceback (most recent call last):
|
405 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
406 |
+
trainer.train(dataloader)
|
407 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
408 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
409 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
410 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
411 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
412 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
413 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
414 |
+
output = model(**micro_batch)
|
415 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
416 |
+
return self._call_impl(*args, **kwargs)
|
417 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
418 |
+
return forward_call(*args, **kwargs)
|
419 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
420 |
+
sharded_logits = self.model(
|
421 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
422 |
+
return self._call_impl(*args, **kwargs)
|
423 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
424 |
+
return forward_call(*args, **kwargs)
|
425 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
426 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
427 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
428 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
429 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
430 |
+
return self._call_impl(*args, **kwargs)
|
431 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
432 |
+
return forward_call(*args, **kwargs)
|
433 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
434 |
+
output = self.pp_block(**new_kwargs)
|
435 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
436 |
+
return self._call_impl(*args, **kwargs)
|
437 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
438 |
+
return forward_call(*args, **kwargs)
|
439 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
440 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
441 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
442 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
443 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
444 |
+
return self._call_impl(*args, **kwargs)
|
445 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
446 |
+
Traceback (most recent call last):
|
447 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
448 |
+
return forward_call(*args, **kwargs)
|
449 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
450 |
+
trainer.train(dataloader)
|
451 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
452 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
453 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
454 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
455 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
456 |
+
return self._call_impl(*args, **kwargs)
|
457 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
458 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
459 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
460 |
+
return forward_call(*args, **kwargs)
|
461 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
462 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
463 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
464 |
+
output = model(**micro_batch)
|
465 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
466 |
+
return row_linear(
|
467 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
468 |
+
return self._call_impl(*args, **kwargs)
|
469 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
470 |
+
return forward_call(*args, **kwargs)
|
471 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
472 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
473 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
474 |
+
sharded_logits = self.model(
|
475 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
476 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
477 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
478 |
+
return self._call_impl(*args, **kwargs)
|
479 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
480 |
+
Traceback (most recent call last):
|
481 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
482 |
+
out = F.linear(tensor, weight, bias)
|
483 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 2 has a total capacty of 79.33 GiB of which 931.94 MiB is free. Including non-PyTorch memory, this process has 78.41 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 701.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
484 |
+
return forward_call(*args, **kwargs)
|
485 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
486 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
487 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
488 |
+
trainer.train(dataloader)
|
489 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
490 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
491 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
492 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
493 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
494 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
495 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
496 |
+
return self._call_impl(*args, **kwargs)
|
497 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
498 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
499 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
500 |
+
return forward_call(*args, **kwargs)
|
501 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
502 |
+
output = model(**micro_batch)
|
503 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
504 |
+
output = self.pp_block(**new_kwargs)
|
505 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
506 |
+
return self._call_impl(*args, **kwargs)
|
507 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
508 |
+
return self._call_impl(*args, **kwargs)
|
509 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
510 |
+
return forward_call(*args, **kwargs)
|
511 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
512 |
+
return forward_call(*args, **kwargs)
|
513 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
514 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
515 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
516 |
+
sharded_logits = self.model(
|
517 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
518 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
519 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
520 |
+
return self._call_impl(*args, **kwargs)
|
521 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
522 |
+
return self._call_impl(*args, **kwargs)
|
523 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
524 |
+
Traceback (most recent call last):
|
525 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
526 |
+
trainer.train(dataloader)
|
527 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
528 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
529 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
530 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
531 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
532 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
533 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
534 |
+
output = model(**micro_batch)
|
535 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
536 |
+
return self._call_impl(*args, **kwargs)
|
537 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
538 |
+
return forward_call(*args, **kwargs)
|
539 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
540 |
+
sharded_logits = self.model(
|
541 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
542 |
+
return self._call_impl(*args, **kwargs)
|
543 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
544 |
+
return forward_call(*args, **kwargs)
|
545 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
546 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
547 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
548 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
549 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
550 |
+
return self._call_impl(*args, **kwargs)
|
551 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
552 |
+
return forward_call(*args, **kwargs)
|
553 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
554 |
+
output = self.pp_block(**new_kwargs)
|
555 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
556 |
+
return self._call_impl(*args, **kwargs)
|
557 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
558 |
+
return forward_call(*args, **kwargs)
|
559 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
560 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
561 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
562 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
563 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
564 |
+
return self._call_impl(*args, **kwargs)
|
565 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
566 |
+
return forward_call(*args, **kwargs)
|
567 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
568 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
569 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
570 |
+
return self._call_impl(*args, **kwargs)
|
571 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
572 |
+
return forward_call(*args, **kwargs)
|
573 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
574 |
+
return row_linear(
|
575 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
576 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
577 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
578 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
579 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
580 |
+
out = F.linear(tensor, weight, bias)
|
581 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 0 has a total capacty of 79.33 GiB of which 963.94 MiB is free. Including non-PyTorch memory, this process has 78.38 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 701.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
582 |
+
Traceback (most recent call last):
|
583 |
+
return forward_call(*args, **kwargs)
|
584 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
585 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
586 |
+
return forward_call(*args, **kwargs)
|
587 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
588 |
+
Traceback (most recent call last):
|
589 |
+
File "/fsx/nouamane/projects/nanotron/run_train.py", line 253, in <module>
|
590 |
+
trainer.train(dataloader)
|
591 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
592 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
593 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
594 |
+
trainer.train(dataloader)
|
595 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
596 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 461, in train
|
597 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
598 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
599 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
600 |
+
return self._call_impl(*args, **kwargs)
|
601 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
602 |
+
outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
603 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/trainer.py", line 498, in training_step
|
604 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
605 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
606 |
+
return forward_call(*args, **kwargs)
|
607 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
608 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
609 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
610 |
+
return self._call_impl(*args, **kwargs)
|
611 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
612 |
+
outputs = self.pipeline_engine.train_batch_iter(
|
613 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 284, in train_batch_iter
|
614 |
+
return row_linear(
|
615 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
616 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
617 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
618 |
+
output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
619 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 50, in forward
|
620 |
+
output = model(**micro_batch)
|
621 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
622 |
+
return forward_call(*args, **kwargs)
|
623 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
624 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
625 |
+
output = model(**micro_batch)
|
626 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
627 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
628 |
+
output = self.pp_block(**new_kwargs)
|
629 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
630 |
+
return self._call_impl(*args, **kwargs)
|
631 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
632 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
633 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
634 |
+
return self._call_impl(*args, **kwargs)
|
635 |
+
return self._call_impl(*args, **kwargs)
|
636 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
637 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
638 |
+
return forward_call(*args, **kwargs)
|
639 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
640 |
+
out = F.linear(tensor, weight, bias)
|
641 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 6 has a total capacty of 79.33 GiB of which 611.94 MiB is free. Including non-PyTorch memory, this process has 78.72 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 1021.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
642 |
+
return forward_call(*args, **kwargs)
|
643 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 1013, in forward
|
644 |
+
sharded_logits = self.model(
|
645 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
646 |
+
return forward_call(*args, **kwargs)
|
647 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
648 |
+
sharded_logits = self.model(
|
649 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
650 |
+
return self._call_impl(*args, **kwargs)
|
651 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
652 |
+
return self._call_impl(*args, **kwargs)
|
653 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
654 |
+
return forward_call(*args, **kwargs)
|
655 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
656 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
657 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
658 |
+
return forward_call(*args, **kwargs)
|
659 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 887, in forward
|
660 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
661 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
662 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
663 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
664 |
+
return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
665 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 903, in forward_with_hidden_states
|
666 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
667 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
668 |
+
return self._call_impl(*args, **kwargs)
|
669 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
670 |
+
return self._call_impl(*args, **kwargs)
|
671 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
672 |
+
hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
673 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
674 |
+
return forward_call(*args, **kwargs)
|
675 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
676 |
+
return self._call_impl(*args, **kwargs)
|
677 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
678 |
+
return forward_call(*args, **kwargs)
|
679 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
680 |
+
output = self.pp_block(**new_kwargs)
|
681 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
682 |
+
return forward_call(*args, **kwargs)
|
683 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
684 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
685 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
686 |
+
return self._call_impl(*args, **kwargs)
|
687 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
688 |
+
output = self.pp_block(**new_kwargs)
|
689 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
690 |
+
return self._call_impl(*args, **kwargs)
|
691 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
692 |
+
return forward_call(*args, **kwargs)
|
693 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
694 |
+
return self._call_impl(*args, **kwargs)
|
695 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
696 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)return forward_call(*args, **kwargs)return forward_call(*args, **kwargs)
|
697 |
+
|
698 |
+
|
699 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
700 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 752, in forward
|
701 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
702 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
703 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
704 |
+
return row_linear(
|
705 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
706 |
+
hidden_states, sequence_mask = self._core_forward(hidden_states, sequence_mask)
|
707 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 731, in _core_forward
|
708 |
+
hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"]
|
709 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
710 |
+
return self._call_impl(*args, **kwargs)
|
711 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
712 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
713 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
714 |
+
return self._call_impl(*args, **kwargs)
|
715 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
716 |
+
return forward_call(*args, **kwargs)
|
717 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
718 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
719 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
720 |
+
return forward_call(*args, **kwargs)
|
721 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/models/llama.py", line 245, in forward
|
722 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
723 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
724 |
+
out = F.linear(tensor, weight, bias)
|
725 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 3 has a total capacty of 79.33 GiB of which 875.94 MiB is free. Including non-PyTorch memory, this process has 78.46 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 637.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
726 |
+
hidden_states = self.down_proj(self.split_silu_mul(merged_states))
|
727 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
|
728 |
+
return self._call_impl(*args, **kwargs)
|
729 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
730 |
+
return self._call_impl(*args, **kwargs)
|
731 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
|
732 |
+
return forward_call(*args, **kwargs)
|
733 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
734 |
+
return forward_call(*args, **kwargs)
|
735 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 162, in forward
|
736 |
+
return row_linear(
|
737 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
738 |
+
return row_linear(
|
739 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 593, in row_linear
|
740 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
741 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
742 |
+
return _RowLinearAsyncCommunication.apply(input, weight, bias, group, tp_mode)
|
743 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
|
744 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
745 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
746 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
747 |
+
File "/fsx/nouamane/projects/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 466, in forward
|
748 |
+
out = F.linear(tensor, weight, bias)
|
749 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 4 has a total capacty of 79.33 GiB of which 915.94 MiB is free. Including non-PyTorch memory, this process has 78.42 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 701.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
750 |
+
out = F.linear(tensor, weight, bias)
|
751 |
+
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 5 has a total capacty of 79.33 GiB of which 675.94 MiB is free. Including non-PyTorch memory, this process has 78.66 GiB memory in use. Of the allocated memory 69.47 GiB is allocated by PyTorch, and 957.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
752 |
+
[2024-12-18 11:39:22,633] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 149565 closing signal SIGTERM
|
753 |
+
[2024-12-18 11:39:22,633] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 149566 closing signal SIGTERM
|
754 |
+
[2024-12-18 11:39:22,633] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 149569 closing signal SIGTERM
|
755 |
+
[2024-12-18 11:39:22,634] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 149570 closing signal SIGTERM
|
756 |
+
[2024-12-18 11:39:22,634] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 149571 closing signal SIGTERM
|
757 |
+
[2024-12-18 11:39:27,631] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299056 closing signal SIGTERM
|
758 |
+
[2024-12-18 11:39:27,631] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299058 closing signal SIGTERM
|
759 |
+
[2024-12-18 11:39:27,631] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299059 closing signal SIGTERM
|
760 |
+
[2024-12-18 11:39:27,631] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299061 closing signal SIGTERM
|
761 |
+
[2024-12-18 11:39:27,631] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299062 closing signal SIGTERM
|
762 |
+
[2024-12-18 11:39:37,060] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 149564) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
763 |
+
Traceback (most recent call last):
|
764 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 8, in <module>
|
765 |
+
sys.exit(main())
|
766 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
767 |
+
return f(*args, **kwargs)
|
768 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
769 |
+
run(args)
|
770 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
771 |
+
elastic_launch(
|
772 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
773 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
774 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
775 |
+
raise ChildFailedError(
|
776 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
777 |
+
============================================================
|
778 |
+
/fsx/nouamane/projects/nanotron/run_train.py FAILED
|
779 |
+
------------------------------------------------------------
|
780 |
+
Failures:
|
781 |
+
[1]:
|
782 |
+
time : 2024-12-18_11:39:22
|
783 |
+
host : ip-26-0-168-34.ec2.internal
|
784 |
+
rank : 11 (local_rank: 3)
|
785 |
+
exitcode : 1 (pid: 149567)
|
786 |
+
error_file: <N/A>
|
787 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
788 |
+
[2]:
|
789 |
+
time : 2024-12-18_11:39:22
|
790 |
+
host : ip-26-0-168-34.ec2.internal
|
791 |
+
rank : 12 (local_rank: 4)
|
792 |
+
exitcode : 1 (pid: 149568)
|
793 |
+
error_file: <N/A>
|
794 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
795 |
+
------------------------------------------------------------
|
796 |
+
Root Cause (first observed failure):
|
797 |
+
[0]:
|
798 |
+
time : 2024-12-18_11:39:22
|
799 |
+
host : ip-26-0-168-34.ec2.internal
|
800 |
+
rank : 8 (local_rank: 0)
|
801 |
+
exitcode : 1 (pid: 149564)
|
802 |
+
error_file: <N/A>
|
803 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
804 |
+
============================================================
|
805 |
+
srun: error: ip-26-0-168-34: task 1: Exited with exit code 1
|
806 |
+
srun: launch/slurm: _step_signal: Terminating StepId=13490809.6
|
807 |
+
slurmstepd: error: *** STEP 13490809.6 ON ip-26-0-160-225 CANCELLED AT 2024-12-18T11:39:37 ***
|
808 |
+
[2024-12-18 11:39:37,357] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
809 |
+
[2024-12-18 11:39:37,358] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299056 closing signal SIGTERM
|
810 |
+
[2024-12-18 11:39:37,358] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 299062 closing signal SIGTERM
|
811 |
+
Traceback (most recent call last):
|
812 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 8, in <module>
|
813 |
+
sys.exit(main())
|
814 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
815 |
+
return f(*args, **kwargs)
|
816 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
817 |
+
run(args)
|
818 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
819 |
+
elastic_launch(
|
820 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
821 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
822 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
823 |
+
result = agent.run()
|
824 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
825 |
+
result = f(*args, **kwargs)
|
826 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
827 |
+
result = self._invoke_run(role)
|
828 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 878, in _invoke_run
|
829 |
+
run_result = self._monitor_workers(self._worker_group)
|
830 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
831 |
+
result = f(*args, **kwargs)
|
832 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py", line 307, in _monitor_workers
|
833 |
+
result = self._pcontext.wait(0)
|
834 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 288, in wait
|
835 |
+
return self._poll()
|
836 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 668, in _poll
|
837 |
+
self.close() # terminate all running procs
|
838 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 331, in close
|
839 |
+
self._close(death_sig=death_sig, timeout=timeout)
|
840 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 713, in _close
|
841 |
+
handler.proc.wait(time_to_wait)
|
842 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/subprocess.py", line 1209, in wait
|
843 |
+
return self._wait(timeout=timeout)
|
844 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/subprocess.py", line 1953, in _wait
|
845 |
+
time.sleep(delay)
|
846 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
847 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
848 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 298983 got signal: 15
|
849 |
+
srun: error: ip-26-0-160-225: task 0: Exited with exit code 1
|
logs/13490826-2024-12-18-12-27-58-bench_1.34G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13490827-2024-12-18-12-26-05-bench_1.34G_dp2_tp8_pp1_acc8_mbs16_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
srun: Job 13490827 step creation temporarily disabled, retrying (Requested nodes are busy)
|
2 |
+
srun: Job 13490827 step creation still disabled, retrying (Requested nodes are busy)
|
3 |
+
srun: Job 13490827 step creation still disabled, retrying (Requested nodes are busy)
|
4 |
+
srun: Job 13490827 step creation still disabled, retrying (Requested nodes are busy)
|
5 |
+
srun: Job 13490827 step creation still disabled, retrying (Requested nodes are busy)
|
6 |
+
srun: Step created for job 13490827
|
7 |
+
[2024-12-18 12:33:02,519] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
8 |
+
[2024-12-18 12:33:02,521] torch.distributed.run: [WARNING] master_addr is only used for static rdzv_backend and when rdzv_endpoint is not specified.
|
9 |
+
[2024-12-18 12:33:02,519] torch.distributed.run: [WARNING]
|
10 |
+
[2024-12-18 12:33:02,519] torch.distributed.run: [WARNING] *****************************************
|
11 |
+
[2024-12-18 12:33:02,519] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
12 |
+
[2024-12-18 12:33:02,519] torch.distributed.run: [WARNING] *****************************************
|
13 |
+
[2024-12-18 12:33:02,521] torch.distributed.run: [WARNING]
|
14 |
+
[2024-12-18 12:33:02,521] torch.distributed.run: [WARNING] *****************************************
|
15 |
+
[2024-12-18 12:33:02,521] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
16 |
+
[2024-12-18 12:33:02,521] torch.distributed.run: [WARNING] *****************************************
|
17 |
+
NCCL version 2.18.5+cuda12.2
|
18 |
+
12/18/2024 12:33:24 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Measuring inter-GPU and intra-node bandwidth...
|
19 |
+
NCCL version 2.18.5+cuda12.2
|
20 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Bandwidth measurement complete. Time taken: 15.48 seconds
|
21 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Config:
|
22 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Config(general=GeneralArgs(project='debug',
|
23 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: run='1.34G_dp2_tp8_pp1_acc8_mbs16_seq4096_zero0_tpmodeRED_vocab131k',
|
24 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: seed=42,
|
25 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: step=None,
|
26 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: consumed_train_samples=None,
|
27 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: benchmark_csv_path=PosixPath('benchmark/results/bench_final2.csv'),
|
28 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: ignore_sanity_checks=True),
|
29 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: parallelism=ParallelismArgs(dp=2,
|
30 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pp=1,
|
31 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tp=8,
|
32 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fd3240a0b80>,
|
33 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
34 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tp_linear_async_communication=True,
|
35 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: recompute_layer=False,
|
36 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tp_recompute_allgather=True,
|
37 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: expert_parallel_size=1),
|
38 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=0,
|
39 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: eos_token_id=0,
|
40 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: hidden_act='silu',
|
41 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: hidden_size=2048,
|
42 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: initializer_range=0.02,
|
43 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: intermediate_size=8192,
|
44 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: is_llama_config=True,
|
45 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: max_position_embeddings=4096,
|
46 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_attention_heads=32,
|
47 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_hidden_layers=16,
|
48 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_key_value_heads=32,
|
49 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pad_token_id=None,
|
50 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pretraining_tp=1,
|
51 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rms_norm_eps=1e-05,
|
52 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_scaling=None,
|
53 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_theta=10000.0,
|
54 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_interleaved=False,
|
55 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tie_word_embeddings=True,
|
56 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: use_cache=True,
|
57 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: vocab_size=131072),
|
58 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: init_method=RandomInit(std=0.02),
|
59 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: dtype=torch.bfloat16,
|
60 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: make_vocab_size_divisible_by=1,
|
61 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: ddp_bucket_cap_mb=25),
|
62 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tokenizer=TokenizerArgs(tokenizer_name_or_path='robot-test/dummy-tokenizer-wordlevel',
|
63 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tokenizer_revision=None,
|
64 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tokenizer_max_length=None),
|
65 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: checkpoints=CheckpointsArgs(checkpoints_path=PosixPath('checkpoints'),
|
66 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: checkpoint_interval=10000,
|
67 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: save_initial_state=False,
|
68 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: save_final_state=False,
|
69 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: resume_checkpoint_path=None,
|
70 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: checkpoints_path_is_shared_file_system=False),
|
71 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: logging=LoggingArgs(log_level='info',
|
72 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: log_level_replica='info',
|
73 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: iteration_step_info_interval=1),
|
74 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tokens=TokensArgs(sequence_length=4096,
|
75 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: train_steps=100,
|
76 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: micro_batch_size=16,
|
77 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: batch_accumulation_per_replica=8,
|
78 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: val_check_interval=100,
|
79 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: limit_val_batches=0,
|
80 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: limit_test_batches=0),
|
81 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
82 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: adam_beta1=0.9,
|
83 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: adam_beta2=0.95,
|
84 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: torch_adam_is_fused=True,
|
85 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: name='adamW'),
|
86 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: zero_stage=0,
|
87 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: weight_decay=0.01,
|
88 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: clip_grad=1.0,
|
89 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: accumulate_grad_in_fp32=True,
|
90 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0003,
|
91 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lr_warmup_steps=2,
|
92 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lr_warmup_style='linear',
|
93 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lr_decay_style='cosine',
|
94 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lr_decay_steps=13,
|
95 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lr_decay_starting_step=None,
|
96 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: min_decay_lr=1e-05)),
|
97 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: data_stages=[DatasetStageArgs(name='Stable Training Stage',
|
98 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: start_training_step=1,
|
99 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: data=DataArgs(dataset=None,
|
100 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: seed=42,
|
101 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_loading_workers=1))],
|
102 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: profiler=None,
|
103 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: lighteval=None,
|
104 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: s3_upload=None)
|
105 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Model Config:
|
106 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: LlamaConfig(bos_token_id=0,
|
107 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: eos_token_id=0,
|
108 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: hidden_act='silu',
|
109 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: hidden_size=2048,
|
110 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: initializer_range=0.02,
|
111 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: intermediate_size=8192,
|
112 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: is_llama_config=True,
|
113 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: max_position_embeddings=4096,
|
114 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_attention_heads=32,
|
115 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_hidden_layers=16,
|
116 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: num_key_value_heads=32,
|
117 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pad_token_id=None,
|
118 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: pretraining_tp=1,
|
119 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rms_norm_eps=1e-05,
|
120 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_scaling=None,
|
121 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_theta=10000.0,
|
122 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: rope_interleaved=False,
|
123 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: tie_word_embeddings=True,
|
124 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: use_cache=True,
|
125 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: vocab_size=131072)
|
126 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Building model..
|
127 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Initialize RoPE Theta = 10000.0
|
128 |
+
12/18/2024 12:33:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Setting PP block ranks...
|
129 |
+
NCCL version 2.18.5+cuda12.2
|
130 |
+
NCCL version 2.18.5+cuda12.2
|
131 |
+
NCCL version 2.18.5+cuda12.2
|
132 |
+
NCCL version 2.18.5+cuda12.2
|
133 |
+
NCCL version 2.18.5+cuda12.2
|
134 |
+
NCCL version 2.18.5+cuda12.2
|
135 |
+
NCCL version 2.18.5+cuda12.2
|
136 |
+
NCCL version 2.18.5+cuda12.2
|
137 |
+
NCCL version 2.18.5+cuda12.2
|
138 |
+
NCCL version 2.18.5+cuda12.2
|
139 |
+
NCCL version 2.18.5+cuda12.2
|
140 |
+
NCCL version 2.18.5+cuda12.2
|
141 |
+
NCCL version 2.18.5+cuda12.2
|
142 |
+
NCCL version 2.18.5+cuda12.2
|
143 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
144 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
145 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
146 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
147 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Total number of parameters: 1.34G (2561.03MiB)
|
148 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
149 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
150 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
151 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-242]: Local number of parameters: 168M (320.13MiB)
|
152 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 19858.00MiB
|
153 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 19858.00MiB
|
154 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 18834.00MiB
|
155 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 18834.00MiB
|
156 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 20882.00MiB
|
157 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 17810.00MiB
|
158 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 19858.00MiB
|
159 |
+
12/18/2024 12:33:41 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-242]: [After model building] Memory usage: 321.15MiB. Peak allocated: 5504.00MiB Peak reserved: 16786.00MiB
|
160 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: No checkpoint path provided.
|
161 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Parametrizing model parameters using StandardParametrizator
|
162 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [Optimizer Building] Using LearningRateForSP as learning rate
|
163 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [Training Plan] Stage Stable Training Stage has 99 remaining training steps and has consumed 0 samples
|
164 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Using dummy data generator
|
165 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [Training Plan] There are 1 training stages
|
166 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [Stage Stable Training Stage] start from step 1
|
167 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]:
|
168 |
+
12/18/2024 12:33:42 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: [Start training] datetime: 2024-12-18 12:33:42.739740 | mbs: 16 | grad_accum: 8 | global_batch_size: 256 | sequence_length: 4096 | train_steps: 100 | start_iteration_step: 0 | consumed_train_samples: 0
|
169 |
+
12/18/2024 12:33:44 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Resuming training from stage Stable Training Stage, it has trained for 0 samples and has 99 remaining train steps
|
170 |
+
12/18/2024 12:33:44 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 1921.80MiB. Peak allocated 5504.00MiB. Peak reserved: 17810.00MiB
|
171 |
+
12/18/2024 12:33:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 2506.93MiB. Peak allocated 24095.36MiB. Peak reserved: 41200.00MiB
|
172 |
+
12/18/2024 12:33:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: iteration: 1 / 100 | consumed_tokens: 1.05M | elapsed_time_per_iteration_ms: 4.63K | tokens_per_sec: 226K | tokens_per_sec_per_gpu: 14.1K | global_batch_size: 256 | lm_loss: 12.2 | lr: 0.00015 | model_tflops_per_gpu: 137 | hardware_tflops_per_gpu: 137 | grad_norm: 0.489 | cuda_memory_allocated: 3.97G | cuda_max_memory_reserved: 43.2G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.5G | hd_free_memory_tb: 247G
|
173 |
+
12/18/2024 12:33:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 3787.49MiB. Peak allocated 3787.49MiB. Peak reserved: 41200.00MiB
|
174 |
+
12/18/2024 12:33:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 3787.50MiB. Peak allocated 25375.92MiB. Peak reserved: 41200.00MiB
|
175 |
+
12/18/2024 12:33:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: iteration: 2 / 100 | consumed_tokens: 2.1M | elapsed_time_per_iteration_ms: 2.35K | tokens_per_sec: 446K | tokens_per_sec_per_gpu: 27.9K | global_batch_size: 256 | lm_loss: 12.2 | lr: 0.0003 | model_tflops_per_gpu: 269 | hardware_tflops_per_gpu: 269 | grad_norm: 0.489 | cuda_memory_allocated: 3.97G | cuda_max_memory_reserved: 43.2G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.5G | hd_free_memory_tb: 247G
|
176 |
+
12/18/2024 12:33:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 3787.49MiB. Peak allocated 3787.54MiB. Peak reserved: 41200.00MiB
|
177 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Memory usage: 3787.50MiB. Peak allocated 25375.92MiB. Peak reserved: 41200.00MiB
|
178 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: iteration: 3 / 100 | consumed_tokens: 3.15M | elapsed_time_per_iteration_ms: 2.35K | tokens_per_sec: 446K | tokens_per_sec_per_gpu: 27.9K | global_batch_size: 256 | lm_loss: 12.2 | lr: 0.000296 | model_tflops_per_gpu: 269 | hardware_tflops_per_gpu: 269 | grad_norm: 0.485 | cuda_memory_allocated: 3.97G | cuda_max_memory_reserved: 43.2G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.5G | hd_free_memory_tb: 247G
|
179 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: | job_id | name | nodes | seq_len | mbs | batch_accum | gbs | mTFLOPs | hTFLOPs | tok/s/gpu | AllReduce (GB/s) | AllGather (GB/s) | ReduceScatter (GB/s) | AR Intra-node (GB/s) | AG Intra-node (GB/s) | RS Intra-node (GB/s) | Mem Alloc (GB) | Mem Res (GB) | dp | pp | tp | pp_engine | tp_mode | tp_async_comm | hidden_size | hidden_act | num_layers | num_heads | num_kv_heads | max_pos | vocab_size | tie_word_embeddings | dtype | zero_stage | ddp_bucket_cap_mb | accumulate_grad_in_fp32 | Total Params | Local Params |
|
180 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: | -------- | -------------------------------------------------------------- | ----- | ------- | --- | ----------- | --- | ------- | ------- | --------- | ---------------- | ---------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------- | ------------ | -- | -- | -- | ----------------------------------- | -------------- | ------------- | ----------- | ---------- | ---------- | --------- | ------------ | ------- | ---------- | ------------------- | -------------- | ---------- | ----------------- | ----------------------- | ------------ | ------------ |
|
181 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: | 13490827 | 1.34G_dp2_tp8_pp1_acc8_mbs16_seq4096_zero0_tpmodeRED_vocab131k | 2 | 4096 | 16 | 8 | 256 | 269.14 | 269.14 | 27850.78 | 414.95 | 226.22 | 200.70 | 453.93 | 263.77 | 261.42 | 3.70 | 40.23 | 2 | 1 | 8 | OneForwardOneBackwardPipelineEngine | REDUCE_SCATTER | True | 2048 | silu | 16 | 32 | 32 | 4096 | 131072 | True | torch.bfloat16 | 0 | 25 | True | 1.34G | 168M |
|
182 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Attempting to write benchmark results to CSV file: benchmark/results/bench_final2.csv
|
183 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Acquired lock for CSV file: benchmark/results/bench_final2.csv
|
184 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Successfully wrote to CSV file: benchmark/results/bench_final2.csv. Releasing lock...
|
185 |
+
12/18/2024 12:33:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-242]: Throughput logging complete
|
186 |
+
[2024-12-18 12:34:13,827] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 282878 closing signal SIGTERM
|
187 |
+
[2024-12-18 12:34:13,828] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 282882 closing signal SIGTERM
|
188 |
+
[2024-12-18 12:34:13,828] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 282883 closing signal SIGTERM
|
189 |
+
[2024-12-18 12:34:13,839] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 323854) of binary: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/python
|
190 |
+
Traceback (most recent call last):
|
191 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 8, in <module>
|
192 |
+
sys.exit(main())
|
193 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
194 |
+
return f(*args, **kwargs)
|
195 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
196 |
+
run(args)
|
197 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
198 |
+
elastic_launch(
|
199 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
200 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
201 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
|
202 |
+
raise ChildFailedError(
|
203 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
204 |
+
============================================================
|
205 |
+
/fsx/nouamane/projects/nanotron/run_train.py FAILED
|
206 |
+
------------------------------------------------------------
|
207 |
+
Failures:
|
208 |
+
[1]:
|
209 |
+
time : 2024-12-18_12:34:13
|
210 |
+
host : ip-26-0-173-202.ec2.internal
|
211 |
+
rank : 9 (local_rank: 1)
|
212 |
+
exitcode : 1 (pid: 323855)
|
213 |
+
error_file: <N/A>
|
214 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
215 |
+
[2]:
|
216 |
+
time : 2024-12-18_12:34:13
|
217 |
+
host : ip-26-0-173-202.ec2.internal
|
218 |
+
rank : 10 (local_rank: 2)
|
219 |
+
exitcode : 1 (pid: 323856)
|
220 |
+
error_file: <N/A>
|
221 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
222 |
+
[3]:
|
223 |
+
time : 2024-12-18_12:34:13
|
224 |
+
host : ip-26-0-173-202.ec2.internal
|
225 |
+
rank : 11 (local_rank: 3)
|
226 |
+
exitcode : 1 (pid: 323857)
|
227 |
+
error_file: <N/A>
|
228 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
229 |
+
[4]:
|
230 |
+
time : 2024-12-18_12:34:13
|
231 |
+
host : ip-26-0-173-202.ec2.internal
|
232 |
+
rank : 12 (local_rank: 4)
|
233 |
+
exitcode : 1 (pid: 323858)
|
234 |
+
error_file: <N/A>
|
235 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
236 |
+
[5]:
|
237 |
+
time : 2024-12-18_12:34:13
|
238 |
+
host : ip-26-0-173-202.ec2.internal
|
239 |
+
rank : 13 (local_rank: 5)
|
240 |
+
exitcode : 1 (pid: 323859)
|
241 |
+
error_file: <N/A>
|
242 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
243 |
+
[6]:
|
244 |
+
time : 2024-12-18_12:34:13
|
245 |
+
host : ip-26-0-173-202.ec2.internal
|
246 |
+
rank : 14 (local_rank: 6)
|
247 |
+
exitcode : 1 (pid: 323860)
|
248 |
+
error_file: <N/A>
|
249 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
250 |
+
[7]:
|
251 |
+
time : 2024-12-18_12:34:13
|
252 |
+
host : ip-26-0-173-202.ec2.internal
|
253 |
+
rank : 15 (local_rank: 7)
|
254 |
+
exitcode : 1 (pid: 323861)
|
255 |
+
error_file: <N/A>
|
256 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
257 |
+
------------------------------------------------------------
|
258 |
+
Root Cause (first observed failure):
|
259 |
+
[0]:
|
260 |
+
time : 2024-12-18_12:34:13
|
261 |
+
host : ip-26-0-173-202.ec2.internal
|
262 |
+
rank : 8 (local_rank: 0)
|
263 |
+
exitcode : 1 (pid: 323854)
|
264 |
+
error_file: <N/A>
|
265 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
266 |
+
============================================================
|
267 |
+
srun: error: ip-26-0-173-202: task 1: Exited with exit code 1
|
268 |
+
srun: launch/slurm: _step_signal: Terminating StepId=13490827.5
|
269 |
+
slurmstepd: error: *** STEP 13490827.5 ON ip-26-0-160-242 CANCELLED AT 2024-12-18T12:34:14 ***
|
270 |
+
[2024-12-18 12:34:14,230] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGTERM death signal, shutting down workers
|
271 |
+
Traceback (most recent call last):
|
272 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/bin/torchrun", line 8, in <module>
|
273 |
+
sys.exit(main())
|
274 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 346, in wrapper
|
275 |
+
return f(*args, **kwargs)
|
276 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 806, in main
|
277 |
+
run(args)
|
278 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/run.py", line 797, in run
|
279 |
+
elastic_launch(
|
280 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 134, in __call__
|
281 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
282 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 255, in launch_agent
|
283 |
+
result = agent.run()
|
284 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
285 |
+
result = f(*args, **kwargs)
|
286 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 736, in run
|
287 |
+
result = self._invoke_run(role)
|
288 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py", line 878, in _invoke_run
|
289 |
+
run_result = self._monitor_workers(self._worker_group)
|
290 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py", line 124, in wrapper
|
291 |
+
result = f(*args, **kwargs)
|
292 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py", line 307, in _monitor_workers
|
293 |
+
result = self._pcontext.wait(0)
|
294 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 288, in wait
|
295 |
+
return self._poll()
|
296 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 668, in _poll
|
297 |
+
self.close() # terminate all running procs
|
298 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 331, in close
|
299 |
+
self._close(death_sig=death_sig, timeout=timeout)
|
300 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 713, in _close
|
301 |
+
handler.proc.wait(time_to_wait)
|
302 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/subprocess.py", line 1209, in wait
|
303 |
+
return self._wait(timeout=timeout)
|
304 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/subprocess.py", line 1953, in _wait
|
305 |
+
time.sleep(delay)
|
306 |
+
File "/fsx/nouamane/miniconda/envs/2-1-cu121/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 62, in _terminate_process_handler
|
307 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
308 |
+
torch.distributed.elastic.multiprocessing.api.SignalException: Process 282805 got signal: 15
|
309 |
+
srun: error: ip-26-0-160-242: task 0: Exited with exit code 1
|
logs/13505397-bench_1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13512539-bench_1.34G_dp32_tp16_pp1_acc4_mbs2_seq4096_zero0_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13522775-bench_3.57G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13768581-bench_1.34G_dp2_tp2_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803107-bench_3.57G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803282-bench_8.86G_dp2_tp4_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803376-bench_8.86G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803468-bench_1.34G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803707-bench_3.57G_dp2_tp4_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803730-bench_3.57G_dp2_tp8_pp4_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13803789-bench_469G_dp2_tp16_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13848051-bench_1.34G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logs/13848114-bench_80G_dp2_tp2_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|