comic / app.py
deeme's picture
Upload app.py
0479bbc verified
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List
import os
import uuid
import aiohttp
import logging
import openai
from pathlib import Path
import subprocess
import shutil
import ssl
import json
from fastapi.staticfiles import StaticFiles
from pydub import AudioSegment
from PIL import Image
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 环境变量
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
BASE_URL = os.getenv("BASE_URL", "http://localhost:8000")
# 初始化OpenAI
openai.api_key = OPENAI_API_KEY
if OPENAI_BASE_URL:
openai.api_base = OPENAI_BASE_URL
app = FastAPI()
app.mount("/storage", StaticFiles(directory="storage"), name="storage")
# 配置CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # 生产环境中应该限制来源
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# 请求模型
class ComicData(BaseModel):
captions: List[str]
speeches: List[str]
panels: List[str] # 图片URLs
# 下载图片
async def download_image(url, output_path):
try:
# 创建一个不验证SSL的TCP连接器
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=ssl_context)) as session:
async with session.get(url) as response:
if response.status == 200:
with open(output_path, 'wb') as f:
f.write(await response.read())
with Image.open(output_path) as img: # 新增尺寸获取
width, height = img.size
return output_path, width # 返回尺寸
else:
logger.error(f"Failed to download image: {response.status}")
return None, 0
except Exception as e:
logger.error(f"Error downloading image: {e}")
return None, 0
# 生成语音
async def generate_speech(text, voice="alloy", output_path=None):
try:
if not output_path:
output_path = f"{uuid.uuid4()}.mp3"
response = openai.audio.speech.create(
model="tts-1",
voice=voice,
input=text
)
# 直接将内容写入文件
with open(output_path, "wb") as f:
f.write(response.content)
return output_path
except Exception as e:
logger.error(f"Error generating speech: {e}")
return None
# 获取音频文件时长
def get_audio_duration(audio_path):
try:
audio = AudioSegment.from_file(audio_path)
return len(audio) / 1000.0 # 转换为秒
except Exception as e:
logger.error(f"Error getting audio duration: {e}")
return 5.0 # 默认5秒
# 在生成ASS文件时添加完整样式头
ASS_STYLE_HEADER = """
[Script Info]
WrapStyle: 0
ScaledBorderAndShadow: yes
PlayResX: 1920
PlayResY: 1080
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
; Caption样式:高亮青蓝,深蓝描边
Style: Caption,Noto Sans CJK SC,46,&H00FFFF44,&H0000FFFF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,2,3,2,10,10,39,0
; Speech样式:鲜亮黄,黑色描边(最醒目)
Style: Speech,Noto Sans CJK SC,42,&H00FF77FF,&H00FFFFFF,&H003800BF,&H00000000,0,0,0,0,100,100,0,0,1,2,3,8,10,10,39,0
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
"""
def smart_wrap(text, image_width, font_size=48):
"""动态计算每行字符数"""
# 根据实际字体渲染参数计算
avg_char_width = font_size * 0.6 # 中文字符平均宽度(像素)
max_chars_per_line = max(1, int(image_width / avg_char_width) - 2) # 保留边距
# 实现更精确的断行逻辑
lines = []
current_line = []
current_width = 0
for char in text:
char_width = font_size if ord(char) > 255 else font_size//2
if current_width + char_width > image_width - 100: # 保留100像素边距
lines.append(''.join(current_line))
current_line = [char]
current_width = char_width
else:
current_line.append(char)
current_width += char_width
lines.append(''.join(current_line))
return r'\N'.join(lines)
# 创建caption字幕文件(底部显示)
def create_caption_subtitle_file(project_dir, captions, panel_start_times, panel_durations, image_widths):
try:
subtitle_file = os.path.join(project_dir, "captions.ass")
with open(subtitle_file, "w", encoding="utf-8") as f:
f.write(ASS_STYLE_HEADER)
for i, (caption, start, duration, width) in enumerate(zip(captions, panel_start_times, panel_durations, image_widths)):
wrapped_text = smart_wrap(caption, width)
f.write(
f"Dialogue: 0,{format_time(start)},{format_time(start + duration)},"
f"Caption,,0,0,0,,{wrapped_text}\n"
)
return subtitle_file
except Exception as e:
logger.error(f"Error creating caption subtitle file: {e}")
return None
# 创建speech字幕文件(顶部显示)
def create_speech_subtitle_file(project_dir, speeches, panel_start_times, panel_durations, image_widths):
try:
subtitle_file = os.path.join(project_dir, "speeches.ass")
with open(subtitle_file, "w", encoding="utf-8") as f:
f.write(ASS_STYLE_HEADER)
for i, (speech, start, duration, width) in enumerate(zip(speeches, panel_start_times, panel_durations, image_widths)):
wrapped_text = smart_wrap(speech, width)
f.write(
f"Dialogue: 0,{format_time(start)},{format_time(start + duration)},"
f"Speech,,0,0,0,,{wrapped_text}\n"
)
return subtitle_file
except Exception as e:
logger.error(f"Error creating speech subtitle file: {e}")
return None
# 格式化时间
def format_time(seconds):
hours = int(seconds / 3600)
minutes = int((seconds % 3600) / 60)
secs = int(seconds % 60)
centisecs = int((seconds - int(seconds)) * 100)
return f"{hours}:{minutes:02}:{secs:02}.{centisecs:02}"
# 创建音频文件
async def create_audio_file(project_dir, captions, speeches):
try:
audio_parts = []
audio_durations = {}
panel_start_times = [0] # 第一个面板从0秒开始
current_time = 0
panel_durations = []
# 为每个面板生成音频
for i, (caption, speech) in enumerate(zip(captions, speeches)):
panel_audio_parts = []
panel_duration = 0
# 每个面板的旁白
if caption:
caption_audio = os.path.join(project_dir, f"caption_{i}.mp3")
result = await generate_speech(caption, "f2ed19ca0ea246bf9cbc6382be00e4fc", caption_audio)
if result:
duration = get_audio_duration(caption_audio)
audio_durations[f"caption_{i}"] = duration
panel_audio_parts.append(caption_audio)
panel_duration += duration
# 每个面板的对话
if speech:
speech_audio = os.path.join(project_dir, f"speech_{i}.mp3")
result = await generate_speech(speech, "3b55b3d84d2f453a98d8ca9bb24182d6", speech_audio)
if result:
duration = get_audio_duration(speech_audio)
audio_durations[f"speech_{i}"] = duration
panel_audio_parts.append(speech_audio)
panel_duration += duration
# 确保每个面板至少有最小时长
if panel_duration == 0:
panel_duration = 5.0 # 默认5秒
panel_durations.append(panel_duration)
# 合并当前面板的音频(先caption后speech)
if panel_audio_parts:
panel_combined = os.path.join(project_dir, f"panel_{i}_combined.mp3")
combined = AudioSegment.empty()
for audio_path in panel_audio_parts:
segment = AudioSegment.from_file(audio_path)
combined += segment
combined.export(panel_combined, format="mp3")
audio_parts.append(panel_combined)
# 更新下一个面板的开始时间
current_time += panel_duration
if i < len(captions) - 1: # 不是最后一个面板
panel_start_times.append(current_time)
if not audio_parts:
logger.error("No audio parts generated")
return None, {}, [], []
# 合并所有面板的音频
combined_audio = os.path.join(project_dir, "combined_audio.mp3")
final_combined = AudioSegment.empty()
for audio_path in audio_parts:
segment = AudioSegment.from_file(audio_path)
final_combined += segment
final_combined.export(combined_audio, format="mp3")
# 保存音频时长信息
durations_file = os.path.join(project_dir, "audio_durations.json")
with open(durations_file, "w") as f:
json.dump(audio_durations, f)
# 保存面板开始时间信息
panel_times_file = os.path.join(project_dir, "panel_times.json")
with open(panel_times_file, "w") as f:
json.dump({"start_times": panel_start_times, "durations": panel_durations}, f)
return combined_audio, audio_durations, panel_start_times, panel_durations
except Exception as e:
logger.error(f"Error creating audio file: {e}")
import traceback
logger.error(traceback.format_exc())
return None, {}, [], []
def process_sub_path(path):
"""深度处理FFmpeg路径转义"""
# 统一转换为POSIX路径
processed = Path(path).as_posix()
# 转义特殊字符 [ ] : , ' \
processed = processed.translate(str.maketrans({
':': r'\:',
"'": r"\\\'",
',': r'\\,',
'[': r'\\[',
']': r'\\]',
' ': r'\ '
}))
return f"'{processed}'" # 用单引号包裹整个路径
# 创建视频
def create_video(project_dir, image_paths, caption_subtitle_file, speech_subtitle_file,
audio_file, output_video, audio_durations, panel_start_times, panel_durations):
try:
# 生成动画效果滤镜链
filter_parts = []
concat_parts = []
for i, (img, duration) in enumerate(zip(image_paths, panel_durations)):
# 动态计算动画时间(淡入淡出各占1/6时长,最低保证0.2秒)
anim_duration = max(duration * 0.166, 0.2)
fade_in = min(anim_duration, duration * 0.5)
fade_out = min(anim_duration, duration - fade_in)
filter_part = (
f"[{i}:v]loop=loop=-1:size=1,trim=duration={duration}[base{i}];"
f"[base{i}]format=yuva420p,"
f"fade=in:st=0:d={fade_in}:alpha=1,"
f"fade=out:st={duration-fade_out}:d={fade_out}:alpha=1[anim{i}];"
)
filter_parts.append(filter_part)
concat_parts.append(f"[anim{i}]")
concat_str = f"{''.join(concat_parts)}concat=n={len(image_paths)}:v=1:a=0[outv]"
filter_complex = ''.join(filter_parts) + concat_str
# 生成带动画的临时视频
temp_video = os.path.join(project_dir, "temp_video.mp4")
cmd1 = ["ffmpeg", "-y"]
for img in image_paths:
cmd1.extend(["-i", img])
cmd1.extend([
"-i", audio_file,
"-filter_complex", filter_complex,
"-map", "[outv]",
"-map", f"{len(image_paths)}:a",
"-c:v", "libx264", "-pix_fmt", "yuv420p",
"-c:a", "aac", "-strict", "experimental",
"-vsync", "vfr",
"-async", "1",
"-movflags", "+faststart",
temp_video
])
subprocess.run(cmd1, check=True)
# 构建滤镜链
combined_filter = (
f"subtitles={process_sub_path(caption_subtitle_file)}",
f"subtitles={process_sub_path(speech_subtitle_file)}"
)
filter_chain = ",".join(combined_filter)
# 优化ffmpeg命令
cmd_combined = [
"ffmpeg", "-y",
"-i", temp_video,
"-vf", filter_chain,
"-c:a", "copy",
"-c:v", "libx264",
"-preset", "fast",
"-movflags", "+faststart",
output_video
]
subprocess.run(cmd_combined, check=True)
# 清理临时文件
os.remove(temp_video)
return output_video
except subprocess.CalledProcessError as e:
logger.error(f"FFmpeg failed with cmd: {' '.join(e.cmd)}")
logger.error(f"FFmpeg stderr: {e.stderr}")
return None
except Exception as e:
logger.error(f"Unexpected error: {str(e)}")
return None
# 使用本地存储
def upload_to_local_storage(local_path, relative_path):
try:
# 创建存储目录
storage_dir = os.path.abspath("storage")
os.makedirs(storage_dir, exist_ok=True)
# 构建目标路径
target_dir = os.path.dirname(os.path.join(storage_dir, relative_path))
os.makedirs(target_dir, exist_ok=True)
target_path = os.path.join(storage_dir, relative_path)
# 复制文件
shutil.copy2(local_path, target_path)
# 返回完整URL
relative_url = f"/storage/{relative_path.replace(os.sep, '/')}"
full_url = f"{BASE_URL}{relative_url}"
return full_url
except Exception as e:
logger.error(f"Error copying to local storage: {e}")
import traceback
logger.error(traceback.format_exc())
return None
@app.post("/api/generate-video")
async def generate_video(comic_data: ComicData):
# 创建唯一项目ID
project_id = str(uuid.uuid4())
# 使用绝对路径创建项目目录
project_dir = os.path.abspath(os.path.join("temp", project_id))
os.makedirs(project_dir, exist_ok=True)
logger.info(f"Created project directory: {project_dir}")
try:
# 下载图片
image_paths = []
image_widths = []
for i, panel_url in enumerate(comic_data.panels):
output_path = os.path.join(project_dir, f"panel_{i}.jpg")
path_result, img_width = await download_image(panel_url, output_path)
if path_result:
image_paths.append(path_result)
image_widths.append(img_width)
else:
image_widths.append(1920) # 失败时使用默认宽度
if not image_paths:
raise HTTPException(status_code=500, detail="Failed to download images")
logger.info(f"Downloaded {len(image_paths)} images")
# 创建音频文件
audio_file, audio_durations, panel_start_times, panel_durations = await create_audio_file(
project_dir, comic_data.captions, comic_data.speeches
)
if not audio_file:
raise HTTPException(status_code=500, detail="Failed to create audio file")
logger.info(f"Created audio file: {audio_file}")
# 创建字幕文件 - 分别为caption和speech创建
caption_subtitle_file = create_caption_subtitle_file(
project_dir, comic_data.captions, panel_start_times, panel_durations, image_widths
)
if not caption_subtitle_file:
raise HTTPException(status_code=500, detail="Failed to create caption subtitle file")
speech_subtitle_file = create_speech_subtitle_file(
project_dir, comic_data.speeches, panel_start_times, panel_durations, image_widths
)
if not speech_subtitle_file:
raise HTTPException(status_code=500, detail="Failed to create speech subtitle file")
logger.info(f"Created subtitle files: {caption_subtitle_file}, {speech_subtitle_file}")
# 创建视频
output_video = os.path.join(project_dir, "output.mp4")
result = create_video(
project_dir, image_paths, caption_subtitle_file, speech_subtitle_file,
audio_file, output_video, audio_durations, panel_start_times, panel_durations
)
if not result:
raise HTTPException(status_code=500, detail="Failed to create video")
logger.info(f"Created video: {output_video}")
# 上传视频
video_url = upload_to_local_storage(output_video, f"{project_id}/video.mp4")
# subtitle_url = upload_to_local_storage(subtitle_file, f"{project_id}/captions.srt")
# audio_url = upload_to_local_storage(audio_file, f"{project_id}/combined_audio.mp3")
# 上传图片
# image_urls = []
# for i, img_path in enumerate(image_paths):
# remote_path = f"{project_id}/images/panel_{i}.jpg"
# img_url = upload_to_local_storage(img_path, remote_path)
# if img_url:
# image_urls.append(img_url)
# 清理临时文件
shutil.rmtree(project_dir, ignore_errors=True)
return {
"videoUrl": video_url,
# "subtitleUrl": subtitle_url,
# "audioUrl": audio_url,
# "imageUrls": image_urls,
"projectId": project_id
}
except Exception as e:
# 清理临时文件
shutil.rmtree(project_dir, ignore_errors=True)
logger.error(f"Error generating video: {e}")
import traceback
logger.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
# 健康检查端点
@app.get("/")
async def health_check():
return {"status": "ok"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)