|
{ |
|
"added_tokens_decoder": { |
|
"0": { |
|
"content": "<sos>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"1": { |
|
"content": "<pad>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"2": { |
|
"content": "<|endoftext|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"3": { |
|
"content": "<unk>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"4": { |
|
"content": "<|system|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"5": { |
|
"content": "<|user|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"6": { |
|
"content": "<|assistant|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"7": { |
|
"content": "<|instruction|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"8": { |
|
"content": "<|response|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"9": { |
|
"content": "<|separator|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
} |
|
}, |
|
"bos_token": "<sos>", |
|
"chat_template": "\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- bos_token + '<|instruction|>' + ' ' + message['content'].strip() }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|response|>' + ' ' + message['content'].strip() + eos_token }}\n {%- endif %}\n{%- endfor %}\n", |
|
"clean_up_tokenization_spaces": true, |
|
"eos_token": "<|endoftext|>", |
|
"extra_special_tokens": {}, |
|
"model_max_length": 1000000000000000019884624838656, |
|
"pad_token": "<pad>", |
|
"tokenizer_class": "PreTrainedTokenizerFast", |
|
"trim_offsets": true, |
|
"unk_token": "<unk>" |
|
} |
|
|