Spaces:
Running
on
Zero
Running
on
Zero
Update optimized.py
Browse files- optimized.py +11 -9
optimized.py
CHANGED
@@ -93,6 +93,7 @@ if getattr(pipe, "vae", None) is not None:
|
|
93 |
pipe.vae.enable_slicing()
|
94 |
except AttributeError:
|
95 |
# Method 2: Apply manual slicing for Flux compatibility [source_id]pipeline_flux_controlnet.py
|
|
|
96 |
pipe.vae.decode = self_attention_slicing(pipe.vae.decode, 2)
|
97 |
|
98 |
pipe.enable_attention_slicing(1)
|
@@ -106,7 +107,7 @@ pipe.enable_attention_slicing(1)
|
|
106 |
|
107 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
108 |
@spaces.GPU
|
109 |
-
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale):
|
110 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
111 |
|
112 |
# Load control image
|
@@ -123,13 +124,10 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
|
|
123 |
num_inference_steps=steps,
|
124 |
guidance_scale=guidance_scale,
|
125 |
height=h,
|
126 |
-
width=w
|
|
|
|
|
127 |
).images[0]
|
128 |
-
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
129 |
-
# Aggressive memory cleanup
|
130 |
-
# torch.cuda.empty_cache()
|
131 |
-
# torch.cuda.ipc_collect()
|
132 |
-
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
133 |
return image
|
134 |
# Create Gradio interface
|
135 |
iface = gr.Interface(
|
@@ -139,8 +137,12 @@ iface = gr.Interface(
|
|
139 |
gr.Slider(1, 3, value=1, label="Scale"),
|
140 |
gr.Slider(2, 20, value=8, label="Steps"),
|
141 |
gr.Image(type="pil", label="Control Image"),
|
142 |
-
gr.
|
143 |
-
|
|
|
|
|
|
|
|
|
144 |
],
|
145 |
outputs=[
|
146 |
gr.Image(type="pil", label="Generated Image", format="png"),
|
|
|
93 |
pipe.vae.enable_slicing()
|
94 |
except AttributeError:
|
95 |
# Method 2: Apply manual slicing for Flux compatibility [source_id]pipeline_flux_controlnet.py
|
96 |
+
print("Falling back to manual attention slicing.")
|
97 |
pipe.vae.decode = self_attention_slicing(pipe.vae.decode, 2)
|
98 |
|
99 |
pipe.enable_attention_slicing(1)
|
|
|
107 |
|
108 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
109 |
@spaces.GPU
|
110 |
+
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, guidance_start, guidance_end):
|
111 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
112 |
|
113 |
# Load control image
|
|
|
124 |
num_inference_steps=steps,
|
125 |
guidance_scale=guidance_scale,
|
126 |
height=h,
|
127 |
+
width=w,
|
128 |
+
control_guidance_start=control_guidance_start,
|
129 |
+
control_guidance_end=control_guidance_end
|
130 |
).images[0]
|
|
|
|
|
|
|
|
|
|
|
131 |
return image
|
132 |
# Create Gradio interface
|
133 |
iface = gr.Interface(
|
|
|
137 |
gr.Slider(1, 3, value=1, label="Scale"),
|
138 |
gr.Slider(2, 20, value=8, label="Steps"),
|
139 |
gr.Image(type="pil", label="Control Image"),
|
140 |
+
with gr.Accordion(label="Settings (Advanced)"):
|
141 |
+
gr.Slider(0, 1, value=0.6, label="ControlNet Scale"),
|
142 |
+
gr.Slider(1, 20, value=3.5, label="Guidance Scale"),
|
143 |
+
with gr.Row:
|
144 |
+
gr.Slider(0, 1, value=0.0, label="Control Guidance Start"),
|
145 |
+
gr.Slider(0, 1, value=1.0 label="Control Guidance End"),
|
146 |
],
|
147 |
outputs=[
|
148 |
gr.Image(type="pil", label="Generated Image", format="png"),
|