Spaces:
Sleeping
Sleeping
Update lunacode.py
Browse files- lunacode.py +27 -10
lunacode.py
CHANGED
@@ -5,10 +5,31 @@ from duckduckgo_search import DDGS
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
import wikipedia
|
7 |
|
8 |
-
# ✅ Load Luna model from Hugging Face
|
9 |
model_path = "cosmosai471/Luna-v2"
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
11 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def luna_generate(prompt, max_tokens=200, temperature=0.7, top_p=0.95):
|
14 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
@@ -21,10 +42,6 @@ def luna_generate(prompt, max_tokens=200, temperature=0.7, top_p=0.95):
|
|
21 |
)
|
22 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
23 |
|
24 |
-
def is_code_related(question):
|
25 |
-
keywords = ["code", "program", "python", "javascript", "build a function", "algorithm", "write a", "html", "css"]
|
26 |
-
return any(kw in question.lower() for kw in keywords)
|
27 |
-
|
28 |
def code_prompt_from_question(question):
|
29 |
return f'''You are a helpful AI programmer. Your task is to generate complete and clean code with explanations.
|
30 |
|
@@ -72,9 +89,10 @@ def scrape_first_good_content(urls):
|
|
72 |
continue
|
73 |
return None, None
|
74 |
|
75 |
-
def smart_luna_answer(user_question, max_tokens=512
|
76 |
-
|
77 |
-
|
|
|
78 |
prompt = code_prompt_from_question(user_question)
|
79 |
code_response = luna_generate(prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
80 |
|
@@ -96,7 +114,6 @@ Code:
|
|
96 |
return f"Luna (code + web-enhanced):\n{code_response.strip()}\n\n(Source: {url_used})"
|
97 |
return f"Luna (code):\n{code_response.strip()}"
|
98 |
|
99 |
-
# Base Luna response
|
100 |
base_prompt = f"User: {user_question}\nLuna:"
|
101 |
base_answer = luna_generate(base_prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
102 |
|
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
import wikipedia
|
7 |
|
|
|
8 |
model_path = "cosmosai471/Luna-v2"
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
model_path, torch_dtype=torch.bfloat16
|
12 |
+
).to("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
+
|
14 |
+
def detect_mode(query):
|
15 |
+
code_keywords = ["code", "program", "python", "javascript", "function", "script", "build", "html", "css"]
|
16 |
+
creative_keywords = ["story", "write a story", "poem", "creative", "imagine", "novel", "dialogue"]
|
17 |
+
|
18 |
+
if any(kw in query.lower() for kw in code_keywords):
|
19 |
+
return "code"
|
20 |
+
elif any(kw in query.lower() for kw in creative_keywords):
|
21 |
+
return "creative"
|
22 |
+
else:
|
23 |
+
return "general"
|
24 |
+
|
25 |
+
def get_generation_params(query):
|
26 |
+
mode = detect_mode(query)
|
27 |
+
if mode == "code":
|
28 |
+
return 0.3, 0.85
|
29 |
+
elif mode == "creative":
|
30 |
+
return 0.95, 0.95
|
31 |
+
else:
|
32 |
+
return 0.7, 0.9
|
33 |
|
34 |
def luna_generate(prompt, max_tokens=200, temperature=0.7, top_p=0.95):
|
35 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
42 |
)
|
43 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
44 |
|
|
|
|
|
|
|
|
|
45 |
def code_prompt_from_question(question):
|
46 |
return f'''You are a helpful AI programmer. Your task is to generate complete and clean code with explanations.
|
47 |
|
|
|
89 |
continue
|
90 |
return None, None
|
91 |
|
92 |
+
def smart_luna_answer(user_question, max_tokens=512):
|
93 |
+
temperature, top_p = get_generation_params(user_question)
|
94 |
+
|
95 |
+
if detect_mode(user_question) == "code":
|
96 |
prompt = code_prompt_from_question(user_question)
|
97 |
code_response = luna_generate(prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
98 |
|
|
|
114 |
return f"Luna (code + web-enhanced):\n{code_response.strip()}\n\n(Source: {url_used})"
|
115 |
return f"Luna (code):\n{code_response.strip()}"
|
116 |
|
|
|
117 |
base_prompt = f"User: {user_question}\nLuna:"
|
118 |
base_answer = luna_generate(base_prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
119 |
|