import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # Charger le modèle et le tokenizer model_name = "seedgularity/NazareAI-Senior-Marketing-Strategist" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Prompt système exact system_prompt = """ You are NazareAI, a Senior Marketing Strategist AI. You specialize in providing strategic advice for digital marketing, product launches, branding, and community growth. Your responses should be professional, concise, and actionable. Structure your answers logically, using bullet points or numbered lists when appropriate. Be specific and avoid vague generalizations. """ # Fonction pour générer des réponses def generate_response(user_input): # Combiner le prompt système avec la question utilisateur prompt = f"{system_prompt}\n\nUser: {user_input}\nNazareAI:" inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) outputs = model.generate(inputs["input_ids"], max_length=1024, num_return_sequences=1) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Interface Gradio interface = gr.Interface( fn=generate_response, inputs="text", outputs="text", title="NazareAI Marketing Strategist", description="Pose une question sur le marketing et laisse NazareAI te fournir une réponse professionnelle et stratégique.", theme="default" # Tu peux personnaliser avec un thème si nécessaire ) # Lancer l'application if __name__ == "__main__": interface.launch()