TASUKU INUI
commited on
Updated README.md
Browse filesAdded the date and time to the output file name.
README.md
CHANGED
@@ -49,6 +49,8 @@ if torch.cuda.get_device_capability()[0] >= 8:
|
|
49 |
from unsloth import FastLanguageModel
|
50 |
import json
|
51 |
from tqdm import tqdm
|
|
|
|
|
52 |
|
53 |
model_name = "onewan/llm-jp-3-13b-finetune-2"
|
54 |
new_model_id = "llm-jp-3-13b-finetune-2"
|
@@ -87,7 +89,9 @@ for dt in tqdm(datasets):
|
|
87 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
88 |
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
89 |
|
90 |
-
|
|
|
|
|
91 |
for result in results:
|
92 |
json.dump(result, f, ensure_ascii=False)
|
93 |
f.write('\n')
|
|
|
49 |
from unsloth import FastLanguageModel
|
50 |
import json
|
51 |
from tqdm import tqdm
|
52 |
+
import datetime
|
53 |
+
import pytz
|
54 |
|
55 |
model_name = "onewan/llm-jp-3-13b-finetune-2"
|
56 |
new_model_id = "llm-jp-3-13b-finetune-2"
|
|
|
89 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
90 |
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
91 |
|
92 |
+
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime("%Y%m%d-%H%M%S")
|
93 |
+
|
94 |
+
with open(f"{new_model_id}_output_{now}.jsonl", 'w', encoding='utf-8') as f:
|
95 |
for result in results:
|
96 |
json.dump(result, f, ensure_ascii=False)
|
97 |
f.write('\n')
|