from diffusers import DPMSolverMultistepScheduler import gradio as gr from PIL import Image import cv2 import qrcode import numpy as np from transformers import pipeline import PIL.Image from diffusers.utils import load_image from accelerate import Accelerator from diffusers import StableDiffusionPipeline import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel accelerator = Accelerator(cpu=True) models =[ "runwayml/stable-diffusion-v1-5", "prompthero/openjourney-v4", "CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1", "stablediffusionapi/disney-pixal-cartoon", "stablediffusionapi/edge-of-realism", "MirageML/fantasy-scene", "wavymulder/lomo-diffusion", "sd-dreambooth-library/fashion", "DucHaiten/DucHaitenDreamWorld", "VegaKH/Ultraskin", "kandinsky-community/kandinsky-2-1", "MirageML/lowpoly-cyberpunk", "thehive/everyjourney-sdxl-0.9-finetuned", "plasmo/woolitize-768sd1-5", "plasmo/food-crit", "johnslegers/epic-diffusion-v1.1", "Fictiverse/ElRisitas", "robotjung/SemiRealMix", "herpritts/FFXIV-Style", "prompthero/linkedin-diffusion", "RayHell/popupBook-diffusion", "MirageML/lowpoly-world", "deadman44/SD_Photoreal_Merged_Models", "Conflictx/CGI_Animation", "johnslegers/epic-diffusion", "tilake/China-Chic-illustration", "wavymulder/modelshoot", "prompthero/openjourney-lora", "Fictiverse/Stable_Diffusion_VoxelArt_Model", "darkstorm2150/Protogen_v2.2_Official_Release", "hassanblend/HassanBlend1.5.1.2", "hassanblend/hassanblend1.4", "nitrosocke/redshift-diffusion", "prompthero/openjourney-v2", "nitrosocke/Arcane-Diffusion", "Lykon/DreamShaper", "wavymulder/Analog-Diffusion", "nitrosocke/mo-di-diffusion", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlike-art/dreamlike-photoreal-2.0", "digiplay/RealismEngine_v1", "digiplay/AIGEN_v1.4_diffusers", "stablediffusionapi/dreamshaper-v6", "JackAnon/GorynichMix", "p1atdev/liminal-space-diffusion", "nadanainone/gigaschizonegs", "darkVOYAGE/dvMJv4", "lckidwell/album-cover-style", "axolotron/ice-cream-animals", "perion/ai-avatar", "digiplay/GhostMix", "ThePioneer/MISA", "TheLastBen/froggy-style-v21-768", "FloydianSound/Nixeu_Diffusion_v1-5", "kakaobrain/karlo-v1-alpha-image-variations", "digiplay/PotoPhotoRealism_v1", "ConsistentFactor/Aurora-By_Consistent_Factor", "rim0/quadruped_mechas", "Akumetsu971/SD_Samurai_Anime_Model", "Bojaxxx/Fantastic-Mr-Fox-Diffusion", "sd-dreambooth-library/original-character-cyclps", "AIArtsChannel/steampunk-diffusion", ] controlnet = accelerator.prepare(ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32)) generator = torch.Generator(device="cpu").manual_seed(1) def plex(qr_code_value, text, neg_text, modil, one, two, three): apol=[] modal=""+modil+"" pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal, controlnet=controlnet, torch_dtype=torch.float32, use_safetensors=False, safety_checker=None)) pipe.scheduler = accelerator.prepare(DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)) pipe = pipe.to("cpu") negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" prompt = text qr_code = qrcode.make(qr_code_value).resize((512, 512)) rmage = load_image(qr_code) original = rmage.convert("RGB") original.thumbnail((512, 512)) cannyimage = load_image(original).resize((512,512)) cannyimage = np.array(cannyimage) low_threshold = 100 high_threshold = 200 cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold) cannyimage = cannyimage[:, :, None] cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2) cannyimage = Image.fromarray(cannyimage) images = [cannyimage] image = pipe( [prompt]*2, images, num_inference_steps=one, generator=generator, strength=two negative_prompt=[neg_prompt]*2, controlnet_conditioning_scale=three, ) for i, imge in enumerate(imoge["images"]): apol.append(imge) apol.append(original) apol.append(cannyimage) return apol iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="QR Code URL"),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"),gr.Dopdown(choices=models, label="some sd models", value=models[0]), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=5, value=5), gr.Slider(label="prompt strength", minimum=0.1, step=0.1, maximum=1, value=0.2), gr.Slider(label="controlnet scale", minimum=0.1, step=0.1, maximum=1, value=0.8)], outputs=gr.Gallery(label="out", columns=1)) iface.queue(max=1) iface.launch()