Zethris-Temporal-Loom commited on
Commit
b5bfd74
·
verified ·
1 Parent(s): b6de80d

update LLM for response generation

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -790,7 +790,8 @@ def respond(
790
  # Stream response
791
  response = client.chat.completions.create(
792
  messages=[{"role": "user", "content": prompt}],
793
- model="deepseek-r1-distill-llama-70b",
 
794
  stream=True,
795
  )
796
  cumulative_response = "" # Keep track of the cumulative response
 
790
  # Stream response
791
  response = client.chat.completions.create(
792
  messages=[{"role": "user", "content": prompt}],
793
+ model="llama-3.1-8b-instant",
794
+ # model="llama-3.3-70b-versatile",
795
  stream=True,
796
  )
797
  cumulative_response = "" # Keep track of the cumulative response