Spaces:
Paused
Paused
import spaces | |
import torch | |
import torch._dynamo | |
import gradio as gr | |
from peft import AutoPeftModelForCausalLM | |
from transformers import AutoTokenizer, BitsAndBytesConfig | |
torch._dynamo.config.suppress_errors = True | |
torch._dynamo.disable() | |
max_seq_length = 2048 | |
dtype = ( | |
None | |
) | |
load_in_4bit = True | |
quantization_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
# bnb_4bit_compute_dtype=torch.float16, | |
) | |
tokenizer = AutoTokenizer.from_pretrained("ua-l/gemma-2-9b-legal-uk") | |
model = AutoPeftModelForCausalLM.from_pretrained( | |
"ua-l/gemma-2-9b-legal-uk", | |
quantization_config=quantization_config, | |
device_map='auto' | |
) | |
def predict(question): | |
inputs = tokenizer( | |
[f'''### Question: | |
{question} | |
### Answer: | |
'''], return_tensors = "pt").to("cuda") | |
outputs = model.generate(**inputs, max_new_tokens = 128) | |
results = tokenizer.batch_decode(outputs, skip_special_tokens=True) | |
return results[0] | |
inputs = gr.Textbox(lines=2, label="Enter a question", value="Як отримати виплати ВПО?") | |
outputs = gr.Textbox(label="Answer") | |
demo = gr.Interface(fn=predict, inputs=inputs, outputs=outputs) | |
demo.launch() | |