rainym00d's picture
Upload folder using huggingface_hub
c96df66 verified
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
model_name_or_path="meta-llama/Llama-2-7b-chat-hf"
# model_name_or_path="Yukang/LongAlpaca-7B-16k"
# model_name_or_path="lmsys/longchat-7b-v1.5-32k"
# model_name_or_path="syzymon/long_llama_code_7b_instruct"
# super_tokenizer_name_or_path="/home/baaiks/ninglu/code/PluginTransformer/data/outputs/90k_0104+8/super_tokenizer"
super_tokenizer_name_or_path="/home/baaiks/ninglu/code/PluginTransformer/data/outputs/90k_0104+12/super_tokenizer"
# super_tokenizer_name_or_path="/home/baaiks/ninglu/code/PluginTransformer/data/outputs/90k_0111+8/super_tokenizer"
# output_dir="data/results/lm/test_1"
# mkdir -p ${output_dir}
# python -m main.eval_lm \
# --model_name_or_path ${model_name_or_path} \
# --super_tokenizer_name_or_path ${super_tokenizer_name_or_path} \
# --super_tokenizer_num_hidden_layers 8 \
# --device_map "auto" \
# --dataset_list "pg19" \
# --output_dir ${output_dir} \
# --max_length 8192 \
# --target_length 512 \
# --compression_enable true \
# --compression_ratio 16 \
# | tee "${output_dir}/eval_lm.log"
max_length_lst="4096 8192 16384 32768"
compression_ratio_lst="16"
for max_length in ${max_length_lst}; do
for compression_ratio in ${compression_ratio_lst}; do
output_dir="data/results/lm/pg19/llama-12-${compression_ratio}-${max_length}"
mkdir -p ${output_dir}
python -m main.eval_lm \
--model_name_or_path ${model_name_or_path} \
--super_tokenizer_name_or_path ${super_tokenizer_name_or_path} \
--super_tokenizer_num_hidden_layers 12 \
--device_map "auto" \
--dataset_list "pg19" \
--output_dir ${output_dir} \
--max_length ${max_length} \
--target_length 512 \
--compression_enable true \
--compression_ratio ${compression_ratio} \
--down_scale_method "uniform" \
| tee "${output_dir}/eval_lm.log"
done
done