|
import gradio as gr |
|
from PIL import Image, ImageFilter, ImageOps |
|
import numpy as np |
|
import torch |
|
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation, DPTFeatureExtractor, DPTForDepthEstimation |
|
import cv2 |
|
|
|
|
|
seg_model_name = "nvidia/segformer-b1-finetuned-ade-512-512" |
|
seg_feature_extractor = SegformerFeatureExtractor.from_pretrained(seg_model_name) |
|
seg_model = SegformerForSemanticSegmentation.from_pretrained(seg_model_name) |
|
|
|
|
|
depth_model_name = "Intel/dpt-hybrid-midas" |
|
depth_feature_extractor = DPTFeatureExtractor.from_pretrained(depth_model_name) |
|
depth_model = DPTForDepthEstimation.from_pretrained(depth_model_name) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
seg_model.to(device) |
|
depth_model.to(device) |
|
|
|
def process_image(image): |
|
|
|
image = ImageOps.exif_transpose(image).resize((512, 512)) |
|
|
|
|
|
inputs = seg_feature_extractor(images=image, return_tensors="pt").to(device) |
|
with torch.no_grad(): |
|
outputs = seg_model(**inputs) |
|
logits = outputs.logits |
|
segmentation = torch.argmax(logits, dim=1)[0].cpu().numpy() |
|
binary_mask = np.where(segmentation > 0, 255, 0).astype(np.uint8) |
|
|
|
|
|
blurred_background = image.filter(ImageFilter.GaussianBlur(15)) |
|
foreground = Image.fromarray(binary_mask).convert("L").resize(image.size) |
|
output_blur = Image.composite(image, blurred_background, foreground) |
|
|
|
|
|
depth_inputs = depth_feature_extractor(images=image, return_tensors="pt").to(device) |
|
with torch.no_grad(): |
|
depth_outputs = depth_model(**depth_inputs) |
|
predicted_depth = depth_outputs.predicted_depth.squeeze().cpu().numpy() |
|
|
|
|
|
depth_min, depth_max = predicted_depth.min(), predicted_depth.max() |
|
normalized_depth = (predicted_depth - depth_min) / (depth_max - depth_min) |
|
normalized_depth_resized = cv2.resize(normalized_depth, (512, 512)) |
|
|
|
|
|
blurred_image = np.array(image).astype(np.float32) |
|
blur_intensity = normalized_depth_resized * 20 |
|
for y in range(image.size[1]): |
|
for x in range(image.size[0]): |
|
sigma = blur_intensity[y, x] |
|
kernel_size = int(2 * sigma + 1) |
|
if kernel_size > 1: |
|
patch = image.crop((x - kernel_size//2, y - kernel_size//2, x + kernel_size//2 + 1, y + kernel_size//2 + 1)) |
|
patch = patch.filter(ImageFilter.GaussianBlur(sigma)) |
|
blurred_image[y, x, :] = np.array(patch)[kernel_size//2, kernel_size//2, :] |
|
lens_blur_image = Image.fromarray(np.clip(blurred_image, 0, 255).astype(np.uint8)) |
|
|
|
return image, output_blur, lens_blur_image |
|
|
|
iface = gr.Interface( |
|
fn=process_image, |
|
inputs=gr.Image(type="pil", label="Upload an Image"), |
|
outputs=[ |
|
gr.Image(label="Original Image"), |
|
gr.Image(label="Gaussian Blur Effect"), |
|
gr.Image(label="Depth-Based Lens Blur Effect") |
|
], |
|
title="Image Blurring with Gaussian and Depth-Based Lens Blur", |
|
description="Upload an image to see Gaussian blur and depth-based lens blur effects." |
|
) |
|
|
|
iface.launch() |
|
|