import os from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() # Define the model you want to use BASE_MODEL = "openlm-research/open_llama_3b" # Other model options you can choose from: # BASE_MODEL = "meta-llama/Llama-2-7b-chat-hf" # BASE_MODEL = "HuggingFaceH4/zephyr-7b-beta" # If you've fine-tuned the model, use the following line: # MY_MODEL = "your-username/your-model-name" (replace with your model's Hugging Face ID) MY_MODEL = None # Load the Hugging Face token from the environment variable HF_TOKEN = os.getenv("HF_TOKEN") # Check if the token is loaded properly if HF_TOKEN is None: print("Error: HF_TOKEN is not set. Please check your .env file.") else: print("Hugging Face token loaded successfully.")