import json import os import time import uuid import tempfile from PIL import Image, ImageDraw, ImageFont import gradio as gr import base64 import mimetypes from google import genai from google.genai import types import concurrent.futures def save_binary_file(file_name, data): with open(file_name, "wb") as f: f.write(data) def translate_prompt_to_english(text, api_key, model="gemini-2.0-flash-exp"): # تشخیص اینکه آیا متن حاوی کاراکترهای غیر انگلیسی است is_english = all(ord(char) < 128 for char in text) if is_english: return text # اگر متن انگلیسی است، بدون ترجمه بازگردانده شود client = genai.Client(api_key=api_key.strip()) pre_prompt = "Translate this to English:" full_text = pre_prompt + "\n" + text contents = [ types.Content( role="user", parts=[ types.Part.from_text(text=full_text), ], ), ] generate_content_config = types.GenerateContentConfig( temperature=0.5, top_p=0.9, top_k=40, max_output_tokens=8192, response_mime_type="text/plain", ) text_response = "" for chunk in client.models.generate_content_stream( model=model, contents=contents, config=generate_content_config, ): if not chunk.candidates or not chunk.candidates[0].content or not chunk.candidates[0].content.parts: continue text_response += chunk.text + "\n" return text_response.strip() def generate_with_api(api_key, text, file_name, model="gemini-2.0-flash-exp"): client = genai.Client(api_key=api_key.strip()) files = [client.files.upload(file=file_name)] pre_prompt = "Apply these changes to the image:" full_text = pre_prompt + "\n" + text contents = [ types.Content( role="user", parts=[ types.Part.from_uri( file_uri=files[0].uri, mime_type=files[0].mime_type, ), types.Part.from_text(text=full_text), ], ), ] generate_content_config = types.GenerateContentConfig( temperature=1, top_p=0.95, top_k=40, max_output_tokens=8192, response_modalities=["image", "text"], response_mime_type="text/plain", ) text_response = "" image_path = None with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: temp_path = tmp.name for chunk in client.models.generate_content_stream( model=model, contents=contents, config=generate_content_config, ): if not chunk.candidates or not chunk.candidates[0].content or not chunk.candidates[0].content.parts: continue candidate = chunk.candidates[0].content.parts[0] if candidate.inline_data: save_binary_file(temp_path, candidate.inline_data.data) image_path = temp_path break else: text_response += chunk.text + "\n" del files return image_path, text_response def process_single_api(api_key, prompt, file_name, model): if not api_key: return None, "API key not provided" try: translated_prompt = translate_prompt_to_english(prompt, api_key, model) image_path, text_response = generate_with_api(api_key, translated_prompt, file_name, model) if image_path: result_img = Image.open(image_path) if result_img.mode == "RGBA": result_img = result_img.convert("RGB") return result_img, "" return None, text_response if text_response else "No image generated" except Exception as e: return None, f"Error with API {api_key[-4:]}: {str(e)}" def process_image_and_prompt(composite_pil, prompt): try: with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: composite_path = tmp.name composite_pil.save(composite_path) model = "gemini-2.0-flash-exp" api_keys = [ os.environ.get("GEMINI_API_KEY_1"), os.environ.get("GEMINI_API_KEY_2"), os.environ.get("GEMINI_API_KEY_3"), os.environ.get("GEMINI_API_KEY_4") ] result_images = [] error_messages = [] with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: futures = { executor.submit( process_single_api, api_key, prompt, composite_path, model ): api_key for api_key in api_keys } for future in concurrent.futures.as_completed(futures): image, error = future.result() if image: result_images.append(image) if error: error_messages.append(error) os.unlink(composite_path) if not result_images: return None, "\n".join(error_messages) + "\n\n**توجه**: اگر تصویر تولید نشد، لطفاً دستور خود را واضحتر بنویسید یا دوباره امتحان کنید." return result_images, "" except Exception as e: raise gr.Error(f"خطا در پردازش: {e}", duration=5) css = """ footer { visibility: hidden; } [class="flagging"], [id*="flagging"], [class*="gradio-flagging"], [class*="svelte"][class*="flagging"] { display: none !important; } """ with gr.Blocks(css_paths="style.css", css=css) as demo: gr.HTML( """