chengzeyi commited on
Commit
250ae72
·
verified ·
1 Parent(s): b46d1ba

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +27 -26
app.py CHANGED
@@ -264,7 +264,7 @@ def create_error_image(backend, error_message):
264
 
265
  # Save to a BytesIO object instead of a file
266
  buffer = BytesIO()
267
- img.save(buffer, format="PNG")
268
  img_bytes = buffer.getvalue()
269
 
270
  # Convert to base64 and return as data URI
@@ -295,7 +295,6 @@ async def poll_once(manager, backend, request_id):
295
 
296
  # Handle base64 output
297
  output = data["outputs"][0]
298
- # has_nsfw_content = data["has_nsfw_contents"][0]
299
 
300
  # Check if it's a base64 string or URL
301
  if isinstance(output, str) and output.startswith("http"):
@@ -335,11 +334,12 @@ recent_generations = []
335
 
336
  # Example prompts
337
  example_prompts = [
338
- "A deep sea diver exploring an underwater city ruins, using a palette of deep blues and silvers.",
339
  "A Martian greenhouse complex that uses genetically modified crops designed to thrive in low gravity. Outside, rovers fitted with AI guidance systems patrol dusty red plains, ensuring each pressurized dome remains airtight.",
340
  "A sleek, futuristic sports car with glowing blue accents, racing through a virtual reality landscape, 3D render",
341
  ]
342
 
 
343
  # Use a state variable to store session ID
344
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
345
  session_id = gr.State(None) # Add this to store session ID
@@ -347,19 +347,25 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
347
  gr.Markdown("# 🌊 WaveSpeed AI HiDream Arena")
348
 
349
  # Add the introduction with link to WaveSpeedAI
350
- gr.Markdown("""
 
351
  [WaveSpeedAI](https://wavespeed.ai/) is the global pioneer in accelerating AI-powered video and image generation.
352
  "Our in-house inference accelerator provides lossless speedup on image & video generation based on our rich inference optimization software stack, including our in-house inference compiler, CUDA kernel libraries and parallel computing libraries."
353
- """)
354
- gr.Markdown("""
 
 
355
  This demo showcases the performance and outputs of leading image generation models, including HiDream and Flux, on our accelerated inference platform.
356
- """)
 
357
 
358
  with gr.Row():
359
  with gr.Column(scale=3):
360
- example_dropdown = gr.Dropdown(choices=example_prompts,
361
- label="Choose an example prompt",
362
- interactive=True)
 
 
363
  input_text = gr.Textbox(
364
  example_prompts[0],
365
  label="Enter your prompt",
@@ -369,9 +375,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
369
  with gr.Column(scale=1):
370
  generate_btn = gr.Button("Generate", variant="primary")
371
 
372
- example_dropdown.change(lambda ex: ex,
373
- inputs=[example_dropdown],
374
- outputs=[input_text])
375
 
376
  # Two status boxes - small (default) and big (during generation)
377
  small_status_box = gr.Markdown("Ready to generate images",
@@ -395,10 +399,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
395
  performance_plot = gr.Plot(label="Performance Metrics")
396
 
397
  with gr.Accordion("Recent Generations (last 8)", open=False):
398
- recent_gallery = gr.Gallery(label="Recent Generations",
399
- columns=3,
400
- rows=1,
401
- show_label=True)
402
 
403
  def update_recent_gallery(prompt, results):
404
  recent_generations.append({
@@ -409,15 +410,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
409
  })
410
  if len(recent_generations) > 8:
411
  recent_generations.pop(0)
412
-
413
- # Flatten the images from each generation
414
- all_images = []
415
- for gen in reversed(recent_generations): # Show newest first
416
- all_images.extend(
417
- [gen["flux-dev"], gen["hidream-dev"], gen["hidream-full"]])
418
-
419
- # Update the Gradio gallery with the new list
420
- return recent_gallery.update(value=all_images)
421
 
422
  # Add custom CSS for the big status box
423
  css = """
 
264
 
265
  # Save to a BytesIO object instead of a file
266
  buffer = BytesIO()
267
+ img.save(buffer, format="JPEG")
268
  img_bytes = buffer.getvalue()
269
 
270
  # Convert to base64 and return as data URI
 
295
 
296
  # Handle base64 output
297
  output = data["outputs"][0]
 
298
 
299
  # Check if it's a base64 string or URL
300
  if isinstance(output, str) and output.startswith("http"):
 
334
 
335
  # Example prompts
336
  example_prompts = [
337
+ "A deep sea diver exploring an underwater city ruins, using a palette of deep blues and silvers."
338
  "A Martian greenhouse complex that uses genetically modified crops designed to thrive in low gravity. Outside, rovers fitted with AI guidance systems patrol dusty red plains, ensuring each pressurized dome remains airtight.",
339
  "A sleek, futuristic sports car with glowing blue accents, racing through a virtual reality landscape, 3D render",
340
  ]
341
 
342
+
343
  # Use a state variable to store session ID
344
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
345
  session_id = gr.State(None) # Add this to store session ID
 
347
  gr.Markdown("# 🌊 WaveSpeed AI HiDream Arena")
348
 
349
  # Add the introduction with link to WaveSpeedAI
350
+ gr.Markdown(
351
+ """
352
  [WaveSpeedAI](https://wavespeed.ai/) is the global pioneer in accelerating AI-powered video and image generation.
353
  "Our in-house inference accelerator provides lossless speedup on image & video generation based on our rich inference optimization software stack, including our in-house inference compiler, CUDA kernel libraries and parallel computing libraries."
354
+ """
355
+ )
356
+ gr.Markdown(
357
+ """
358
  This demo showcases the performance and outputs of leading image generation models, including HiDream and Flux, on our accelerated inference platform.
359
+ """
360
+ )
361
 
362
  with gr.Row():
363
  with gr.Column(scale=3):
364
+ example_dropdown = gr.Dropdown(
365
+ choices=example_prompts,
366
+ label="Choose an example prompt",
367
+ interactive=True
368
+ )
369
  input_text = gr.Textbox(
370
  example_prompts[0],
371
  label="Enter your prompt",
 
375
  with gr.Column(scale=1):
376
  generate_btn = gr.Button("Generate", variant="primary")
377
 
378
+ example_dropdown.change(lambda ex: ex, inputs=[example_dropdown], outputs=[input_text])
 
 
379
 
380
  # Two status boxes - small (default) and big (during generation)
381
  small_status_box = gr.Markdown("Ready to generate images",
 
399
  performance_plot = gr.Plot(label="Performance Metrics")
400
 
401
  with gr.Accordion("Recent Generations (last 8)", open=False):
402
+ recent_gallery = gr.Gallery(label="Prompt and Output")
 
 
 
403
 
404
  def update_recent_gallery(prompt, results):
405
  recent_generations.append({
 
410
  })
411
  if len(recent_generations) > 8:
412
  recent_generations.pop(0)
413
+ gallery_items = []
414
+ for r in reversed(recent_generations):
415
+ gallery_items.append(
416
+ (r["flux-dev"], f"FLUX-dev: {r['prompt']}"))
417
+ gallery_items.append(
418
+ (r["hidream-dev"], f"HiDream-dev: {r['prompt']}"))
419
+ gallery_items.append(
420
+ (r["hidream-full"], f"HiDream-full: {r['prompt']}"))
421
+ return gr.update(value=gallery_items)
422
 
423
  # Add custom CSS for the big status box
424
  css = """