Update README.md
Browse files
README.md
CHANGED
@@ -27,7 +27,12 @@ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
28 |
|
29 |
messages = [
|
30 |
-
{'role': 'system', 'content': "You are a helpful and harmless AI assistant
|
|
|
|
|
|
|
|
|
|
|
31 |
]
|
32 |
|
33 |
def stream_output(output_text):
|
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
28 |
|
29 |
messages = [
|
30 |
+
{'role': 'system', 'content': """You are a helpful and harmless AI assistant.
|
31 |
+
A conversation between User and Assistant. The user asks a question, and the Assistant solves it.
|
32 |
+
The assistant first thinks about the reasoning process in the mind and then provides the user with the answer.
|
33 |
+
The reasoning process and answer are enclosed within `<think> </think>` and `<answer> </answer>` tags, respectively, i.e., `<think> reasoning process here </think>` `<answer> answer here </answer>`.
|
34 |
+
User: **prompt**.
|
35 |
+
Assistant:"""}
|
36 |
]
|
37 |
|
38 |
def stream_output(output_text):
|