teowu commited on
Commit
d64a45c
·
verified ·
1 Parent(s): 26b237e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -2,7 +2,6 @@ import argparse
2
  import gradio as gr
3
  import os
4
  from PIL import Image
5
- import torch
6
  import spaces
7
 
8
  from kimi_vl.serve.frontend import reload_javascript
@@ -157,8 +156,7 @@ def predict(
157
  gradio_chatbot_output = to_gradio_chatbot(conversation)
158
 
159
  full_response = ""
160
- with torch.no_grad():
161
- for x in kimi_vl_generate(
162
  conversations=all_conv,
163
  model=model,
164
  processor=processor,
@@ -182,7 +180,6 @@ def predict(
182
  yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
183
 
184
  logger.info("flushed result to gradio")
185
- torch.cuda.empty_cache()
186
 
187
  if is_variable_assigned("x"):
188
  print(
 
2
  import gradio as gr
3
  import os
4
  from PIL import Image
 
5
  import spaces
6
 
7
  from kimi_vl.serve.frontend import reload_javascript
 
156
  gradio_chatbot_output = to_gradio_chatbot(conversation)
157
 
158
  full_response = ""
159
+ for x in kimi_vl_generate(
 
160
  conversations=all_conv,
161
  model=model,
162
  processor=processor,
 
180
  yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
181
 
182
  logger.info("flushed result to gradio")
 
183
 
184
  if is_variable_assigned("x"):
185
  print(