soksof commited on
Commit
133559f
·
verified ·
1 Parent(s): a182e84

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -2
README.md CHANGED
@@ -7,6 +7,8 @@ pipeline_tag: text-generation
7
  library_name: transformers
8
  tags:
9
  - text-generation-inference
 
 
10
  ---
11
 
12
  # Llama-Krikri-8B-Instruct: An Instruction-tuned Large Language Model for the Greek language
@@ -51,8 +53,13 @@ tokenizer = AutoTokenizer.from_pretrained("ilsp/Llama-Krikri-8B-Base")
51
 
52
  model.to(device)
53
 
54
- input_text = tokenizer("Ένα κρικρί διαφέρει απο ένα λάμα επειδή", return_tensors='pt').to(device)
55
- outputs = model.generate(input_text['input_ids'], max_new_tokens=256, do_sample=True)
 
 
 
 
 
56
 
57
  print(tokenizer.batch_decode(outputs)[0])
58
  ```
 
7
  library_name: transformers
8
  tags:
9
  - text-generation-inference
10
+ base_model:
11
+ - ilsp/Llama-Krikri-8B-Base
12
  ---
13
 
14
  # Llama-Krikri-8B-Instruct: An Instruction-tuned Large Language Model for the Greek language
 
53
 
54
  model.to(device)
55
 
56
+ messages = [
57
+ {"role": "system", "content": "-------"},
58
+ {"role": "user", "content": "Σε τι διαφέρει ένα κρικρί από ένα λάμα;"},
59
+ ]
60
+ prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
61
+ input_prompt = tokenizer(prompt, return_tensors='pt').to(device)
62
+ outputs = model.generate(input_prompt['input_ids'], max_new_tokens=256, do_sample=True)
63
 
64
  print(tokenizer.batch_decode(outputs)[0])
65
  ```