Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -49,7 +49,8 @@ def generar_embedding(texto):
|
|
49 |
|
50 |
# LLM para generar respuesta final
|
51 |
llm = pipeline(
|
52 |
-
"text-generation"
|
|
|
53 |
model="meta-llama/Llama-3.2-3B-Instruct",
|
54 |
token=hf_token,
|
55 |
trust_remote_code=True
|
@@ -96,7 +97,7 @@ def responder(pregunta):
|
|
96 |
resultado = llm(
|
97 |
prompt,
|
98 |
max_new_tokens=500,
|
99 |
-
temperature=0.
|
100 |
top_p=0.9,
|
101 |
repetition_penalty=1.2
|
102 |
)[0]["generated_text"]
|
|
|
49 |
|
50 |
# LLM para generar respuesta final
|
51 |
llm = pipeline(
|
52 |
+
"text-generation"
|
53 |
+
,
|
54 |
model="meta-llama/Llama-3.2-3B-Instruct",
|
55 |
token=hf_token,
|
56 |
trust_remote_code=True
|
|
|
97 |
resultado = llm(
|
98 |
prompt,
|
99 |
max_new_tokens=500,
|
100 |
+
temperature=0.4,
|
101 |
top_p=0.9,
|
102 |
repetition_penalty=1.2
|
103 |
)[0]["generated_text"]
|