Update README.md
Browse files
README.md
CHANGED
@@ -13,6 +13,143 @@ language:
|
|
13 |
|
14 |
# Uploaded model
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
- **Developed by:** Mountaingorillas
|
17 |
- **License:** apache-2.0
|
18 |
- **Finetuned from model :** llm-jp/llm-jp-3-13b
|
|
|
13 |
|
14 |
# Uploaded model
|
15 |
|
16 |
+
*** Python
|
17 |
+
|
18 |
+
以下は、Elyza-tasks-100-tv_0.jsonl を推論するためのpython code です。
|
19 |
+
|
20 |
+
# llm-jp/llm-jp-3-13bを4bit量子化のqLoRA設定でロード。
|
21 |
+
|
22 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
23 |
+
from unsloth import FastLanguageModel
|
24 |
+
import torch
|
25 |
+
max_seq_length = 512 # unslothではRoPEをサポートしているのでコンテキスト長は自由に設定可能
|
26 |
+
dtype = None # Noneにしておけば自動で設定
|
27 |
+
load_in_4bit = True # 今回は8Bクラスのモデルを扱うためTrue
|
28 |
+
|
29 |
+
model_id = "llm-jp/llm-jp-3-13b"
|
30 |
+
new_model_id = "llm-jp-3-13b-finetune-2" #Fine-Tuningしたモデルにつけたい名前
|
31 |
+
# FastLanguageModel インスタンスを作成
|
32 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
33 |
+
model_name=model_id,
|
34 |
+
dtype=dtype,
|
35 |
+
load_in_4bit=load_in_4bit,
|
36 |
+
trust_remote_code=True,
|
37 |
+
)
|
38 |
+
|
39 |
+
# SFT用のモデルを用意
|
40 |
+
model = FastLanguageModel.get_peft_model(
|
41 |
+
model,
|
42 |
+
r = 32,
|
43 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
44 |
+
"gate_proj", "up_proj", "down_proj",],
|
45 |
+
lora_alpha = 32,
|
46 |
+
lora_dropout = 0.05,
|
47 |
+
bias = "none",
|
48 |
+
use_gradient_checkpointing = "unsloth",
|
49 |
+
random_state = 3407,
|
50 |
+
use_rslora = False,
|
51 |
+
loftq_config = None,
|
52 |
+
max_seq_length = max_seq_length,
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
from datasets import load_dataset
|
57 |
+
dataset = load_dataset("json", data_files="/content/drive/My Drive/Student_LLM(公開)/05.最終課題/Distribution20241221_all/ichikara-instruction-003-001-1.json")
|
58 |
+
|
59 |
+
# 学習時のプロンプトフォーマットの定義
|
60 |
+
prompt = """### 指示
|
61 |
+
{}
|
62 |
+
### 回答
|
63 |
+
{}"""
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
"""
|
68 |
+
formatting_prompts_func: 各データをプロンプトに合わせた形式に合わせる
|
69 |
+
"""
|
70 |
+
EOS_TOKEN = tokenizer.eos_token # トークナイザーのEOSトークン(文末トークン)
|
71 |
+
def formatting_prompts_func(examples):
|
72 |
+
input = examples["text"] # 入力データ
|
73 |
+
output = examples["output"] # 出力データ
|
74 |
+
text = prompt.format(input, output) + EOS_TOKEN # プロンプトの作成
|
75 |
+
return { "formatted_text" : text, } # 新しいフィールド "formatted_text" を返す
|
76 |
+
pass
|
77 |
+
|
78 |
+
# # 各データにフォーマットを適用
|
79 |
+
dataset = dataset.map(
|
80 |
+
formatting_prompts_func,
|
81 |
+
num_proc= 4, # 並列処理数を指定
|
82 |
+
)
|
83 |
+
|
84 |
+
dataset
|
85 |
+
|
86 |
+
from trl import SFTTrainer
|
87 |
+
from transformers import TrainingArguments
|
88 |
+
from unsloth import is_bfloat16_supported
|
89 |
+
|
90 |
+
trainer = SFTTrainer(
|
91 |
+
model = model,
|
92 |
+
tokenizer = tokenizer,
|
93 |
+
train_dataset=dataset["train"],
|
94 |
+
max_seq_length = max_seq_length,
|
95 |
+
dataset_text_field="formatted_text",
|
96 |
+
packing = False,
|
97 |
+
args = TrainingArguments(
|
98 |
+
per_device_train_batch_size = 2,
|
99 |
+
gradient_accumulation_steps = 4,
|
100 |
+
num_train_epochs = 1,
|
101 |
+
logging_steps = 10,
|
102 |
+
warmup_steps = 10,
|
103 |
+
save_steps=100,
|
104 |
+
save_total_limit=2,
|
105 |
+
max_steps=-1,
|
106 |
+
learning_rate = 2e-4,
|
107 |
+
fp16 = not is_bfloat16_supported(),
|
108 |
+
bf16 = is_bfloat16_supported(),
|
109 |
+
group_by_length=True,
|
110 |
+
seed = 3407,
|
111 |
+
output_dir = "outputs",
|
112 |
+
report_to = "none",
|
113 |
+
),
|
114 |
+
)
|
115 |
+
|
116 |
+
#@title 学習実行
|
117 |
+
trainer_stats = trainer.train()
|
118 |
+
|
119 |
+
# ELYZA-tasks-100-TVの読み込み。事前にファイルをアップロードしてください
|
120 |
+
# データセットの読み込み。
|
121 |
+
# omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。
|
122 |
+
import json
|
123 |
+
datasets = []
|
124 |
+
with open("/content/drive/My Drive/Student_LLM(公開)/05.最終課題/elyza-tasks-100-TV_0.jsonl", "r") as f:
|
125 |
+
item = ""
|
126 |
+
for line in f:
|
127 |
+
line = line.strip()
|
128 |
+
item += line
|
129 |
+
if item.endswith("}"):
|
130 |
+
datasets.append(json.loads(item))
|
131 |
+
item = ""
|
132 |
+
# 学習したモデルを用いてタスクを実行
|
133 |
+
from tqdm import tqdm
|
134 |
+
|
135 |
+
# 推論するためにモデルのモードを変更
|
136 |
+
FastLanguageModel.for_inference(model)
|
137 |
+
|
138 |
+
results = []
|
139 |
+
for dt in tqdm(datasets):
|
140 |
+
input = dt["input"]
|
141 |
+
|
142 |
+
prompt = f"""### 指示\n{input}\n### 回答\n"""
|
143 |
+
|
144 |
+
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
|
145 |
+
|
146 |
+
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
|
147 |
+
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
148 |
+
|
149 |
+
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
150 |
+
|
151 |
+
***
|
152 |
+
|
153 |
- **Developed by:** Mountaingorillas
|
154 |
- **License:** apache-2.0
|
155 |
- **Finetuned from model :** llm-jp/llm-jp-3-13b
|