ford442 commited on
Commit
66f0f2b
·
1 Parent(s): 2bf500c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -29
app.py CHANGED
@@ -167,14 +167,11 @@ def generate_60(
167
  num_images: int = 1,
168
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
169
  ):
170
- torch.set_default_device('cuda')
171
  global models
172
  pipe = models[model_choice]
173
  seed = int(randomize_seed_fn(seed, randomize_seed))
174
- generator = torch.Generator(device='cuda').manual_seed(seed)
175
-
176
  prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
177
-
178
  options = {
179
  "prompt": [prompt] * num_images,
180
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -185,10 +182,8 @@ def generate_60(
185
  "generator": generator,
186
  "output_type": "pil",
187
  }
188
-
189
  if use_resolution_binning:
190
  options["use_resolution_binning"] = True
191
-
192
  images = []
193
  with torch.no_grad():
194
  for i in range(0, num_images, BATCH_SIZE):
@@ -205,7 +200,6 @@ def generate_60(
205
  gc.collect()
206
  return image_paths, seed
207
 
208
-
209
  @spaces.GPU(duration=90)
210
  def generate_90(
211
  model_choice: str,
@@ -226,10 +220,8 @@ def generate_90(
226
  global models
227
  pipe = models[model_choice]
228
  seed = int(randomize_seed_fn(seed, randomize_seed))
229
- generator = torch.Generator(device='cuda').manual_seed(seed)
230
-
231
  prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
232
-
233
  options = {
234
  "prompt": [prompt] * num_images,
235
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -240,10 +232,8 @@ def generate_90(
240
  "generator": generator,
241
  "output_type": "pil",
242
  }
243
-
244
  if use_resolution_binning:
245
  options["use_resolution_binning"] = True
246
-
247
  images = []
248
  with torch.no_grad():
249
  for i in range(0, num_images, BATCH_SIZE):
@@ -274,23 +264,7 @@ def load_predefined_images1():
274
  ]
275
  return predefined_images1
276
 
277
-
278
- # def load_predefined_images():
279
- # predefined_images = [
280
- # "assets2/11.png",
281
- # "assets2/22.png",
282
- # "assets2/33.png",
283
- # "assets2/44.png",
284
- # "assets2/55.png",
285
- # "assets2/66.png",
286
- # "assets2/77.png",
287
- # "assets2/88.png",
288
- # "assets2/99.png",
289
- # ]
290
- # return predefined_image
291
-
292
-
293
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
294
  gr.Markdown(DESCRIPTIONXX)
295
  with gr.Row():
296
  prompt = gr.Text(
 
167
  num_images: int = 1,
168
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
169
  ):
 
170
  global models
171
  pipe = models[model_choice]
172
  seed = int(randomize_seed_fn(seed, randomize_seed))
173
+ generator = torch.Generator(device='cpu').manual_seed(seed)
 
174
  prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
175
  options = {
176
  "prompt": [prompt] * num_images,
177
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
182
  "generator": generator,
183
  "output_type": "pil",
184
  }
 
185
  if use_resolution_binning:
186
  options["use_resolution_binning"] = True
 
187
  images = []
188
  with torch.no_grad():
189
  for i in range(0, num_images, BATCH_SIZE):
 
200
  gc.collect()
201
  return image_paths, seed
202
 
 
203
  @spaces.GPU(duration=90)
204
  def generate_90(
205
  model_choice: str,
 
220
  global models
221
  pipe = models[model_choice]
222
  seed = int(randomize_seed_fn(seed, randomize_seed))
223
+ generator = torch.Generator(device='cpu').manual_seed(seed)
 
224
  prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
225
  options = {
226
  "prompt": [prompt] * num_images,
227
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
232
  "generator": generator,
233
  "output_type": "pil",
234
  }
 
235
  if use_resolution_binning:
236
  options["use_resolution_binning"] = True
 
237
  images = []
238
  with torch.no_grad():
239
  for i in range(0, num_images, BATCH_SIZE):
 
264
  ]
265
  return predefined_images1
266
 
267
+ with gr.Blocks(css=css) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  gr.Markdown(DESCRIPTIONXX)
269
  with gr.Row():
270
  prompt = gr.Text(