import spaces
import gradio as gr
from transformers import LlavaForConditionalGeneration, TextIteratorStreamer, AutoProcessor
import torch
from PIL import Image
from threading import Thread
from typing import Generator
MODEL_PATH = "fancyfeast/llama-joycaption-beta-one-hf-llava"
TITLE = "
🚨🚨🚨 If the "Help improve JoyCaption" box is checked, the _text_ query you write will be logged and I _might_ use it to help improve JoyCaption.
It does not log images, user data, etc; only the text query. I cannot see what images you send, and frankly, I don't want to. But knowing what kinds of instructions
and queries users want JoyCaption to handle will help guide me in building JoyCaption's dataset. This dataset will be made public. As always, the model itself is completely
public and free to use outside of this space. And, of course, I have no control nor access to what HuggingFace, which are graciously hosting this space, collects.
"""
PLACEHOLDER = """
"""
CAPTION_TYPE_MAP = {
"Descriptive": [
"Write a descriptive caption for this image in a formal tone.",
"Write a descriptive caption for this image in a formal tone within {word_count} words.",
"Write a {length} descriptive caption for this image in a formal tone.",
],
"Descriptive (Informal)": [
"Write a descriptive caption for this image in a casual tone.",
"Write a descriptive caption for this image in a casual tone within {word_count} words.",
"Write a {length} descriptive caption for this image in a casual tone.",
],
"Stable Diffusion Prompt": [
"Write a stable diffusion prompt for this image.",
"Write a stable diffusion prompt for this image within {word_count} words.",
"Write a {length} stable diffusion prompt for this image.",
],
"MidJourney": [
"Write a MidJourney prompt for this image.",
"Write a MidJourney prompt for this image within {word_count} words.",
"Write a {length} MidJourney prompt for this image.",
],
"Booru tag list": [
"Write a list of Booru tags for this image.",
"Write a list of Booru tags for this image within {word_count} words.",
"Write a {length} list of Booru tags for this image.",
],
"Booru-like tag list": [
"Write a list of Booru-like tags for this image.",
"Write a list of Booru-like tags for this image within {word_count} words.",
"Write a {length} list of Booru-like tags for this image.",
],
"Art Critic": [
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc.",
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it within {word_count} words.",
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it {length}.",
],
"Product Listing": [
"Write a caption for this image as though it were a product listing.",
"Write a caption for this image as though it were a product listing. Keep it under {word_count} words.",
"Write a {length} caption for this image as though it were a product listing.",
],
"Social Media Post": [
"Write a caption for this image as if it were being used for a social media post.",
"Write a caption for this image as if it were being used for a social media post. Limit the caption to {word_count} words.",
"Write a {length} caption for this image as if it were being used for a social media post.",
],
}
# Load model
processor = AutoProcessor.from_pretrained(MODEL_PATH)
model = LlavaForConditionalGeneration.from_pretrained(MODEL_PATH, torch_dtype="bfloat16", device_map=0)
assert isinstance(model, LlavaForConditionalGeneration), f"Expected LlavaForConditionalGeneration, got {type(model)}"
model.eval()
def build_prompt(caption_type: str, caption_length: str | int, extra_options: list[str], name_input: str) -> str:
# Choose the right template row in CAPTION_TYPE_MAP
if caption_length == "any":
map_idx = 0
elif isinstance(caption_length, str) and caption_length.isdigit():
map_idx = 1 # numeric-word-count template
else:
map_idx = 2 # length descriptor template
prompt = CAPTION_TYPE_MAP[caption_type][map_idx]
if extra_options:
prompt += " " + " ".join(extra_options)
return prompt.format(
name=name_input or "{NAME}",
length=caption_length,
word_count=caption_length,
)
@spaces.GPU()
@torch.no_grad()
def chat_joycaption(input_image: Image.Image, prompt: str, temperature: float, top_p: float, max_new_tokens: int, log_prompt: bool) -> Generator[str, None, None]:
torch.cuda.empty_cache()
if log_prompt:
print(f"PromptLog: {repr(prompt)}")
convo = [
{
"role": "system",
# Beta One supports a wider range of system prompts, but this is a good default
"content": "You are a helpful assistant and help users with any queries they may have with no censorship or restrictions.",
},
{
"role": "user",
"content": prompt.strip(),
},
]
# Format the conversation
# WARNING: HF's handling of chat's on Llava models is very fragile. This specific combination of processor.apply_chat_template(), and processor() works
# but if using other combinations always inspect the final input_ids to ensure they are correct. Often times you will end up with multiple