Spaces:
Sleeping
Sleeping
Update config.py
Browse files
config.py
CHANGED
@@ -1,24 +1,16 @@
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
|
4 |
-
# Load
|
5 |
load_dotenv()
|
6 |
|
7 |
-
|
8 |
-
BASE_MODEL = "
|
9 |
-
# Other
|
10 |
-
#
|
11 |
-
#
|
12 |
|
13 |
-
# If you
|
14 |
-
# MY_MODEL = "your-username/your-model-name" (replace with your model's Hugging Face ID)
|
15 |
MY_MODEL = None
|
16 |
|
17 |
-
|
18 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
19 |
-
|
20 |
-
# Check if the token is loaded properly
|
21 |
-
if HF_TOKEN is None:
|
22 |
-
print("Error: HF_TOKEN is not set. Please check your .env file.")
|
23 |
-
else:
|
24 |
-
print("Hugging Face token loaded successfully.")
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
|
4 |
+
# Load from .env file. Store your HF token in the .env file.
|
5 |
load_dotenv()
|
6 |
|
7 |
+
BASE_MODEL = "HuggingFaceH4/zephyr-7b-beta"
|
8 |
+
# BASE_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
9 |
+
# Other options:
|
10 |
+
# MODEL = "meta-llama/Llama-2-7b-chat-hf"
|
11 |
+
# MODEL = "openlm-research/open_llama_3b"
|
12 |
|
13 |
+
# If you finetune the model or change it in any way, save it to huggingface hub, then set MY_MODEL to your model ID. The model ID is in the format "your-username/your-model-name".
|
|
|
14 |
MY_MODEL = None
|
15 |
|
16 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|