pankajmathur commited on
Commit
3425326
·
verified ·
1 Parent(s): fc8e887

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -10,9 +10,9 @@ base_model:
10
  library_name: transformers
11
  ---
12
 
13
- # Model Name: orca_mini_v9_2_14B
14
 
15
- **orca_mini_v9_2_14B is trained with various SFT Datasets on [microsoft/phi-4](https://huggingface.co/microsoft/phi-4) using Llama's architecture.**
16
 
17
  <img src="https://huggingface.co/pankajmathur/orca_mini_v5_8b/resolve/main/orca_minis_small.jpeg" width="auto" />
18
 
@@ -56,7 +56,7 @@ Below shows a code example on how to use this model in default half precision (b
56
  import torch
57
  from transformers import pipeline
58
 
59
- model_slug = "pankajmathur/orca_mini_v9_2_14B"
60
  pipeline = pipeline(
61
  "text-generation",
62
  model=model_slug,
@@ -76,7 +76,7 @@ Below shows a code example on how to use this model in 4-bit format via bitsandb
76
  import torch
77
  from transformers import BitsAndBytesConfig, pipeline
78
 
79
- model_slug = "pankajmathur/orca_mini_v9_2_14B"
80
  quantization_config = BitsAndBytesConfig(
81
  load_in_4bit=True,
82
  bnb_4bit_quant_type="nf4",
@@ -104,7 +104,7 @@ Below shows a code example on how to use this model in 8-bit format via bitsandb
104
  import torch
105
  from transformers import BitsAndBytesConfig, pipeline
106
 
107
- model_slug = "pankajmathur/orca_mini_v9_2_14B"
108
  quantization_config = BitsAndBytesConfig(
109
  load_in_8bit=True
110
  )
 
10
  library_name: transformers
11
  ---
12
 
13
+ # Model Name: orca_mini_phi-4
14
 
15
+ **orca_mini_phi-4 is trained with various SFT Datasets on [microsoft/phi-4](https://huggingface.co/microsoft/phi-4) using Llama's architecture.**
16
 
17
  <img src="https://huggingface.co/pankajmathur/orca_mini_v5_8b/resolve/main/orca_minis_small.jpeg" width="auto" />
18
 
 
56
  import torch
57
  from transformers import pipeline
58
 
59
+ model_slug = "pankajmathur/orca_mini_phi-4"
60
  pipeline = pipeline(
61
  "text-generation",
62
  model=model_slug,
 
76
  import torch
77
  from transformers import BitsAndBytesConfig, pipeline
78
 
79
+ model_slug = "pankajmathur/orca_mini_phi-4"
80
  quantization_config = BitsAndBytesConfig(
81
  load_in_4bit=True,
82
  bnb_4bit_quant_type="nf4",
 
104
  import torch
105
  from transformers import BitsAndBytesConfig, pipeline
106
 
107
+ model_slug = "pankajmathur/orca_mini_phi-4"
108
  quantization_config = BitsAndBytesConfig(
109
  load_in_8bit=True
110
  )