Spaces:
Running
Running
Johann.Haselberger (PEG-AS)
commited on
Commit
·
26fdb35
1
Parent(s):
a49d644
add round overlay
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import cv2
|
4 |
-
from PIL import Image, ImageOps
|
5 |
import os
|
6 |
import torch
|
7 |
from transformers import AutoModelForImageSegmentation
|
@@ -11,7 +11,6 @@ import re
|
|
11 |
import urllib.request as urllib2
|
12 |
from loguru import logger
|
13 |
|
14 |
-
|
15 |
# Set up model and transformations
|
16 |
def get_background_removal_model():
|
17 |
try:
|
@@ -27,7 +26,6 @@ def get_background_removal_model():
|
|
27 |
print(f"Error loading background removal model: {e}")
|
28 |
return None, None
|
29 |
|
30 |
-
|
31 |
# Set up image transformation
|
32 |
transform_image = transforms.Compose(
|
33 |
[
|
@@ -40,7 +38,6 @@ transform_image = transforms.Compose(
|
|
40 |
# Cache for storing background removal results
|
41 |
bg_removal_cache = {}
|
42 |
|
43 |
-
|
44 |
def get_image_hash(image):
|
45 |
"""Generate a hash for an image to use as cache key"""
|
46 |
if image is None:
|
@@ -53,7 +50,6 @@ def get_image_hash(image):
|
|
53 |
# Include image dimensions in the hash to ensure uniqueness
|
54 |
return f"{img_hash}_{image.width}_{image.height}"
|
55 |
|
56 |
-
|
57 |
def remove_background(image, model_data):
|
58 |
if model_data[0] is None:
|
59 |
return None, None
|
@@ -103,7 +99,6 @@ def remove_background(image, model_data):
|
|
103 |
logger.error(f"Error during background removal: {e}")
|
104 |
return None, None
|
105 |
|
106 |
-
|
107 |
def parse_color(color_str):
|
108 |
"""Parse different color formats including rgba strings"""
|
109 |
if isinstance(color_str, tuple):
|
@@ -145,7 +140,6 @@ def parse_color(color_str):
|
|
145 |
# Default fallback
|
146 |
return (255, 255, 255, 255) # White
|
147 |
|
148 |
-
|
149 |
def add_person_border(image, mask, border_size, border_color="white"):
|
150 |
"""Add a border around the person based on the segmentation mask"""
|
151 |
if border_size == 0:
|
@@ -177,7 +171,6 @@ def add_person_border(image, mask, border_size, border_color="white"):
|
|
177 |
|
178 |
return result
|
179 |
|
180 |
-
|
181 |
def detect_face(image):
|
182 |
"""Detect the largest face in the image and return its bounding box"""
|
183 |
logger.info("Starting face detection")
|
@@ -211,7 +204,6 @@ def detect_face(image):
|
|
211 |
logger.info(f"Largest face detected at: {largest_face}")
|
212 |
return largest_face
|
213 |
|
214 |
-
|
215 |
def center_portrait(portrait, face_box, target_width, target_height, zoom_level=1.0):
|
216 |
"""Center the portrait based on face position and crop to avoid blurriness"""
|
217 |
if face_box is None:
|
@@ -249,9 +241,8 @@ def center_portrait(portrait, face_box, target_width, target_height, zoom_level=
|
|
249 |
|
250 |
return centered_img, (offset_x, offset_y)
|
251 |
|
252 |
-
|
253 |
def process_portrait(
|
254 |
-
input_image, border_size=10, bg_color="#0000FF", zoom_level=1.0, erode_size=5
|
255 |
):
|
256 |
if input_image is None:
|
257 |
return None
|
@@ -327,12 +318,20 @@ def process_portrait(
|
|
327 |
bottom = top + square_size
|
328 |
final_image = final_image.crop((left, top, right, bottom))
|
329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
logger.info(
|
331 |
f"Processing complete (portrait offset by {offset}, zoom: {zoom_level})"
|
332 |
)
|
333 |
return final_image
|
334 |
|
335 |
-
|
336 |
# Create Gradio interface
|
337 |
with gr.Blocks(title="Cool Avatar Creator") as app:
|
338 |
gr.Markdown("# Cool Avatar Creator")
|
@@ -353,6 +352,7 @@ with gr.Blocks(title="Cool Avatar Creator") as app:
|
|
353 |
erode_slider = gr.Slider(
|
354 |
minimum=1, maximum=30, value=15, step=1, label="Erode Size"
|
355 |
)
|
|
|
356 |
process_button = gr.Button("Process Image")
|
357 |
|
358 |
with gr.Column():
|
@@ -384,7 +384,7 @@ with gr.Blocks(title="Cool Avatar Creator") as app:
|
|
384 |
|
385 |
process_button.click(
|
386 |
fn=process_portrait,
|
387 |
-
inputs=[input_image, border_slider, bg_color, zoom_slider, erode_slider],
|
388 |
outputs=output_image,
|
389 |
)
|
390 |
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import cv2
|
4 |
+
from PIL import Image, ImageOps, ImageDraw
|
5 |
import os
|
6 |
import torch
|
7 |
from transformers import AutoModelForImageSegmentation
|
|
|
11 |
import urllib.request as urllib2
|
12 |
from loguru import logger
|
13 |
|
|
|
14 |
# Set up model and transformations
|
15 |
def get_background_removal_model():
|
16 |
try:
|
|
|
26 |
print(f"Error loading background removal model: {e}")
|
27 |
return None, None
|
28 |
|
|
|
29 |
# Set up image transformation
|
30 |
transform_image = transforms.Compose(
|
31 |
[
|
|
|
38 |
# Cache for storing background removal results
|
39 |
bg_removal_cache = {}
|
40 |
|
|
|
41 |
def get_image_hash(image):
|
42 |
"""Generate a hash for an image to use as cache key"""
|
43 |
if image is None:
|
|
|
50 |
# Include image dimensions in the hash to ensure uniqueness
|
51 |
return f"{img_hash}_{image.width}_{image.height}"
|
52 |
|
|
|
53 |
def remove_background(image, model_data):
|
54 |
if model_data[0] is None:
|
55 |
return None, None
|
|
|
99 |
logger.error(f"Error during background removal: {e}")
|
100 |
return None, None
|
101 |
|
|
|
102 |
def parse_color(color_str):
|
103 |
"""Parse different color formats including rgba strings"""
|
104 |
if isinstance(color_str, tuple):
|
|
|
140 |
# Default fallback
|
141 |
return (255, 255, 255, 255) # White
|
142 |
|
|
|
143 |
def add_person_border(image, mask, border_size, border_color="white"):
|
144 |
"""Add a border around the person based on the segmentation mask"""
|
145 |
if border_size == 0:
|
|
|
171 |
|
172 |
return result
|
173 |
|
|
|
174 |
def detect_face(image):
|
175 |
"""Detect the largest face in the image and return its bounding box"""
|
176 |
logger.info("Starting face detection")
|
|
|
204 |
logger.info(f"Largest face detected at: {largest_face}")
|
205 |
return largest_face
|
206 |
|
|
|
207 |
def center_portrait(portrait, face_box, target_width, target_height, zoom_level=1.0):
|
208 |
"""Center the portrait based on face position and crop to avoid blurriness"""
|
209 |
if face_box is None:
|
|
|
241 |
|
242 |
return centered_img, (offset_x, offset_y)
|
243 |
|
|
|
244 |
def process_portrait(
|
245 |
+
input_image, border_size=10, bg_color="#0000FF", zoom_level=1.0, erode_size=5, circular_overlay=False
|
246 |
):
|
247 |
if input_image is None:
|
248 |
return None
|
|
|
318 |
bottom = top + square_size
|
319 |
final_image = final_image.crop((left, top, right, bottom))
|
320 |
|
321 |
+
if circular_overlay:
|
322 |
+
# Create a circular mask
|
323 |
+
mask = Image.new("L", (square_size, square_size), 0)
|
324 |
+
draw = ImageDraw.Draw(mask)
|
325 |
+
draw.ellipse((0, 0, square_size, square_size), fill=255)
|
326 |
+
|
327 |
+
# Apply the circular mask to the final image
|
328 |
+
final_image.putalpha(mask)
|
329 |
+
|
330 |
logger.info(
|
331 |
f"Processing complete (portrait offset by {offset}, zoom: {zoom_level})"
|
332 |
)
|
333 |
return final_image
|
334 |
|
|
|
335 |
# Create Gradio interface
|
336 |
with gr.Blocks(title="Cool Avatar Creator") as app:
|
337 |
gr.Markdown("# Cool Avatar Creator")
|
|
|
352 |
erode_slider = gr.Slider(
|
353 |
minimum=1, maximum=30, value=15, step=1, label="Erode Size"
|
354 |
)
|
355 |
+
circular_overlay_toggle = gr.Checkbox(label="Enable Circular Overlay")
|
356 |
process_button = gr.Button("Process Image")
|
357 |
|
358 |
with gr.Column():
|
|
|
384 |
|
385 |
process_button.click(
|
386 |
fn=process_portrait,
|
387 |
+
inputs=[input_image, border_slider, bg_color, zoom_slider, erode_slider, circular_overlay_toggle],
|
388 |
outputs=output_image,
|
389 |
)
|
390 |
|