ehristoforu commited on
Commit
fd91d6d
·
verified ·
1 Parent(s): 24cd908

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -5
app.py CHANGED
@@ -22,18 +22,34 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
- model_name = "ehristoforu/rufalcon3-3b-it"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
29
- torch_dtype=torch.float16,
30
  device_map="auto"
31
  )
32
- tokenizer = AutoTokenizer.from_pretrained("ehristoforu/rufalcon3-3b-it")
33
 
 
 
 
 
34
 
 
35
 
36
- @spaces.GPU(duration=60)
 
 
 
 
 
 
 
 
 
 
 
37
  def generate(
38
  message: str,
39
  chat_history: list[tuple[str, str]],
@@ -136,4 +152,4 @@ with gr.Blocks(css="style.css", fill_height=True) as demo:
136
  chat_interface.render()
137
 
138
  if __name__ == "__main__":
139
- demo.queue(max_size=20).launch()
 
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ model_name = "Qwen/Qwen2.5-3B-Instruct"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
29
+ torch_dtype="fp16",,
30
  device_map="auto"
31
  )
32
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
+ peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/CoolQwen2.5-3b-it")
35
+ merged_model = peft_model.merge_and_unload()
36
+ merged_model.save_pretrained("./coolqwen")
37
+ tokenizer.save_pretraiend("./coolqwen")
38
 
39
+ from huggingface_hub import HfApi
40
 
41
+ api = HfApi()
42
+
43
+
44
+
45
+ api.upload_folder(
46
+ folder_path="./coolqwen",
47
+ repo_id="ehristoforu/CoolQwen2.5-3B-IT-fp16",
48
+ repo_type="model",
49
+ token=HF_TOKEN,
50
+ )
51
+
52
+ @spaces.GPU(duration=240)
53
  def generate(
54
  message: str,
55
  chat_history: list[tuple[str, str]],
 
152
  chat_interface.render()
153
 
154
  if __name__ == "__main__":
155
+ demo.queue(max_size=20).launch()