Update README.md
Browse files
README.md
CHANGED
@@ -93,8 +93,8 @@ model_inputs = speech_granite_processor(
|
|
93 |
).to(device)
|
94 |
|
95 |
# The recommended repetition penalty is 3 as long as input IDs are excluded.
|
96 |
-
# Otherwise, you should use a
|
97 |
-
|
98 |
penalty=3.0,
|
99 |
prompt_ignore_length=model_inputs["input_ids"].shape[-1],
|
100 |
)
|
@@ -108,7 +108,7 @@ model_outputs = speech_granite.generate(
|
|
108 |
top_p=1.0,
|
109 |
length_penalty=1.0,
|
110 |
temperature=1.0,
|
111 |
-
logits_processor=[
|
112 |
bos_token_id=tokenizer.bos_token_id,
|
113 |
eos_token_id=tokenizer.eos_token_id,
|
114 |
pad_token_id=tokenizer.pad_token_id,
|
|
|
93 |
).to(device)
|
94 |
|
95 |
# The recommended repetition penalty is 3 as long as input IDs are excluded.
|
96 |
+
# Otherwise, you should use a repetition penalty of 1 to keep results stable.
|
97 |
+
repetition_penalty_processor = RepetitionPenaltyLogitsProcessor(
|
98 |
penalty=3.0,
|
99 |
prompt_ignore_length=model_inputs["input_ids"].shape[-1],
|
100 |
)
|
|
|
108 |
top_p=1.0,
|
109 |
length_penalty=1.0,
|
110 |
temperature=1.0,
|
111 |
+
logits_processor=[repetition_penalty_processor],
|
112 |
bos_token_id=tokenizer.bos_token_id,
|
113 |
eos_token_id=tokenizer.eos_token_id,
|
114 |
pad_token_id=tokenizer.pad_token_id,
|