LennardZuendorf commited on
Commit
9c6520d
·
1 Parent(s): 570a1f0

fix: another bugfix

Browse files
Files changed (1) hide show
  1. model/mistral.py +3 -2
model/mistral.py CHANGED
@@ -25,6 +25,7 @@ else:
25
  MODEL = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
26
  MODEL.to(device)
27
  TOKENIZER = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
 
28
 
29
  # default model config
30
  CONFIG = GenerationConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
@@ -93,7 +94,7 @@ def format_answer(answer: str):
93
  formatted_answer = ""
94
 
95
  if type(answer) == list:
96
- answer = fmt.format_output_text
97
 
98
  # extracting text after INST tokens
99
  parts = answer.split("[/INST]")
@@ -116,4 +117,4 @@ def respond(prompt: str):
116
  output_ids = MODEL.generate(input_ids, generation_config=CONFIG)
117
  output_text = TOKENIZER.batch_decode(output_ids)
118
 
119
- return format_answer(output_text)
 
25
  MODEL = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
26
  MODEL.to(device)
27
  TOKENIZER = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
28
+ TOKENIZER.pad_token=TOKENIZER.eos_token
29
 
30
  # default model config
31
  CONFIG = GenerationConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
 
94
  formatted_answer = ""
95
 
96
  if type(answer) == list:
97
+ answer = fmt.format_output_text(answer)
98
 
99
  # extracting text after INST tokens
100
  parts = answer.split("[/INST]")
 
117
  output_ids = MODEL.generate(input_ids, generation_config=CONFIG)
118
  output_text = TOKENIZER.batch_decode(output_ids)
119
 
120
+ return fmt.format_output_text(output_text)