Spaces:
Running
on
Zero
Running
on
Zero
File size: 6,484 Bytes
829d1dc 638c39d 97797b4 c9c864b 829d1dc eea3399 829d1dc 53cc37d 829d1dc c9c864b 70854e0 c9c864b 97797b4 829d1dc 500768f eea3399 97797b4 6667420 829d1dc 9053798 638c39d 829d1dc f7db128 eea3399 53cc37d 829d1dc 9b37578 829d1dc c9c864b 1cd913b ec285c2 829d1dc f7db128 53cc37d eea3399 c9c864b 9ba5625 42f74d9 829d1dc 6667420 829d1dc 27c1ccc 42f74d9 829d1dc 42f74d9 829d1dc 40b4ba6 88ca7f6 40b4ba6 41ba023 40b4ba6 5d65513 4547189 dae934a 5d65513 829d1dc 5d65513 40b4ba6 88ca7f6 4547189 88ca7f6 c1bd7bb 97797b4 d826eac 9ba5625 d826eac 9ba5625 d826eac 97797b4 829d1dc d826eac b482274 d826eac bbe1cf1 d826eac 53cc37d eea3399 b482274 eea3399 1f6904c eea3399 53cc37d b482274 eea3399 15b4f9a 829d1dc 88ca7f6 42f74d9 829d1dc 6667420 829d1dc 27c1ccc 829d1dc 6667420 88ca7f6 6667420 837a61c 6667420 837a61c 6667420 829d1dc d826eac 53cc37d 829d1dc 53cc37d 829d1dc d826eac eea3399 53cc37d 829d1dc 27c1ccc 829d1dc 42f74d9 829d1dc 40b4ba6 829d1dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import gradio as gr
import numpy as np
import random
import spaces # Uncomment if using ZeroGPU
import os
from diffusers import StableDiffusionPipeline, DDPMScheduler
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "stabilityai/stable-diffusion-2-1-base"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
# pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
# pipe = pipe.to(device)
pipe = StableDiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16).to(device)
pipe.scheduler = DDPMScheduler.from_pretrained(model_repo_id, subfolder="scheduler")
folder_of_lora_weights = "./ID-Booth_LoRA_weights"
which_checkpoint = "checkpoint-31-6400"
lora_name = "pytorch_lora_weights.safetensors"
folder_of_identity_images = "./assets/example_images/"
backgrounds_list = ["Forest", "City street", "Beach", "Office", "Bus", "Laboratory", "Factory", "Construction site", "Hospital", "Night club", ""]
poses_list = ["Portrait", "Side-portrait"]
id_list = ["ID_1", "ID_5", "ID_16", "ID_20"]
gender_dict = {"ID_1": "male", "ID_5": "male", "ID_16": "female", "ID_20": "male"}
MAX_SEED = 10000
image_size = 512
@spaces.GPU # Uncomment if using ZeroGPU
def infer(
identity,
background,
pose,
negative_prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps,
num_images=1
):
full_lora_weights_path = f"{folder_of_lora_weights}/{identity}/{which_checkpoint}/{lora_name}"
pipe.load_lora_weights(full_lora_weights_path)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
gender = gender_dict[identity]
# Construct prompt from dropdown selections
prompt = f"face {pose.lower()} photo of {gender} sks person, {background.lower()} background"
images = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=image_size,
height=image_size,
generator=generator,
num_images_per_prompt=num_images,
).images
return images
### Description
header = " # ID-Booth: Identity-consistent Face Generation with Diffusion Models"
description = "This is an official Gradio demo for the paper <a href='https://dariant.github.io/publications/ID-Booth' target='_blank'>ID-Booth: Identity-consistent Face Generation with Diffusion Models</a>"
footer = r"""
**Citation**
<br>
If you find ID-Booth helpful, please consider citing our paper:
```bibtex
@article{tomasevic2025IDBooth,
title={{ID-Booth}: Identity-consistent Face Generation with Diffusion Models},
author={Toma{\v{s}}evi{\'c}, Darian and Boutros, Fadi and Lin, Chenhao and Damer, Naser and {\v{S}}truc, Vitomir and Peer, Peter},
journal={arXiv preprint arXiv:2504.07392},
year={2025}
}
```
"""
css = '''
.gradio-container {
width: 75%;
margin: auto;
}
'''
with gr.Blocks(css=css) as demo:
# description
gr.Markdown(header)
gr.Markdown(description)
with gr.Column():
# with gr.Row():
# gr.Markdown("### Choose an identity, background, and pose:")
with gr.Row():
for id in id_list:
image_path = os.path.join(folder_of_identity_images, id + ".jpg")
img = gr.Image(value=image_path, label=id,
width=256, height=256,
show_label=True, interactive=False,
show_download_button=False,
show_fullscreen_button=False,
show_share_button=False,
)
with gr.Row():
identity = gr.Dropdown(
label="Identity:",
choices=id_list,
value=id_list[2],
)
background = gr.Dropdown(
label="Background:",
choices=backgrounds_list,
value=backgrounds_list[1],
)
pose = gr.Dropdown(
label="Pose:",
choices=poses_list,
value=poses_list[0],
)
run_button = gr.Button("Generate in-the-wild images", scale=0, variant="primary")
#result = gr.Image(label="Result", show_label=False)
result = gr.Gallery(label="Generated Images", show_label=False)
with gr.Accordion(open=False, label="Advanced Options"):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
value="cartoon, cgi, render, illustration, painting, drawing, black and white, bad body proportions, landscape",
)
num_inference_steps = gr.Slider(
label="Number of sampling steps",
minimum=1,
maximum=100,
step=1,
value=30,
)
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.1,
maximum=10.0,
step=0.1,
value=5.0,
)
num_images = gr.Slider(
label="Number of output images",
minimum=1,
maximum=4,
step=1,
value=2,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
# gr.Examples(
# examples=[
# [id_list[0], backgrounds_list[0], poses_list[0], "A beautiful photo of a person", 0, False, 512, 512, 7.5, 50],
# ],
# inputs=[selected_identity, background, pose],
# )
gr.on(
triggers=[run_button.click],
fn=infer,
inputs=[
identity,
background,
pose,
negative_prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps,
num_images
],
outputs=[result],
)
gr.Markdown(footer)
if __name__ == "__main__":
demo.launch()
|