Commit
·
30518ee
1
Parent(s):
c7c6b20
Update README.md
Browse files
README.md
CHANGED
@@ -17,8 +17,7 @@ The Mistral-7B-v0.1 model is a transformer-based model that can handle a variety
|
|
17 |
How to utilize my Mistral for Italian text generation
|
18 |
|
19 |
```python
|
20 |
-
import
|
21 |
-
from transformers import TextStreamer
|
22 |
import torch
|
23 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
24 |
|
@@ -29,14 +28,18 @@ model.to(device)
|
|
29 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
30 |
|
31 |
def generate_answer(prompt):
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
40 |
```
|
41 |
---
|
42 |
## Developer
|
|
|
17 |
How to utilize my Mistral for Italian text generation
|
18 |
|
19 |
```python
|
20 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
21 |
import torch
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
|
|
|
28 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
29 |
|
30 |
def generate_answer(prompt):
|
31 |
+
messages = [
|
32 |
+
{"role": "user", "content": prompt},
|
33 |
+
]
|
34 |
+
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
|
35 |
+
generated_ids = model.generate(model_inputs, max_new_tokens=200, do_sample=True,
|
36 |
+
temperature=0.001, eos_token_id=tokenizer.eos_token_id)
|
37 |
+
decoded = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
38 |
+
return decoded[0]
|
39 |
+
|
40 |
+
prompt = "Come si apre un file json in python?"
|
41 |
+
answer = generate_answer(prompt)
|
42 |
+
print(answer)
|
43 |
```
|
44 |
---
|
45 |
## Developer
|