Spaces:
Sleeping
Sleeping
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled, NoTranscriptFound, VideoUnavailable | |
from urllib.parse import urlparse, parse_qs | |
import gradio as gr | |
from transformers import pipeline | |
# Load Hugging Face summarization model | |
text_summary = pipeline("summarization", model="sshleifer/distilbart-xsum-12-6") | |
# Extract video ID from YouTube URL | |
def get_video_id(youtube_url): | |
query = urlparse(youtube_url) | |
if query.hostname == 'youtu.be': | |
return query.path[1:] | |
elif query.hostname in ['www.youtube.com', 'youtube.com']: | |
if query.path == '/watch': | |
return parse_qs(query.query).get('v', [None])[0] | |
elif query.path.startswith('/embed/') or query.path.startswith('/v/'): | |
return query.path.split('/')[2] | |
return None | |
# Fetch transcript from video ID | |
def fetch_transcript(video_url): | |
video_id = get_video_id(video_url) | |
if not video_id: | |
return "β Invalid YouTube URL." | |
try: | |
transcript = YouTubeTranscriptApi.get_transcript(video_id) | |
return " ".join([entry['text'] for entry in transcript]) | |
except (NoTranscriptFound, TranscriptsDisabled, VideoUnavailable) as e: | |
return f"β {str(e)}" | |
except Exception: | |
try: | |
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) | |
transcript = transcript_list.find_transcript(['en']) | |
return " ".join([entry['text'] for entry in transcript.fetch()]) | |
except Exception as e2: | |
return f"β Error fetching transcript: {str(e2)}" | |
# Split long text safely into small chunks | |
def split_text(text, max_words=500): | |
words = text.split() | |
chunks = [] | |
for i in range(0, len(words), max_words): | |
chunk = " ".join(words[i:i+max_words]) | |
chunks.append(chunk) | |
return chunks | |
# Main function: fetch + summarize any transcript length | |
def summarize_youtube_video(url): | |
transcript = fetch_transcript(url) | |
if transcript.startswith("β"): | |
return transcript | |
try: | |
words = transcript.split() | |
word_count = len(words) | |
if word_count <= 500: | |
summary = text_summary(transcript, max_length=150, min_length=60, do_sample=False) | |
return summary[0]['summary_text'] | |
chunks = split_text(transcript, max_words=500) | |
partial_summaries = [] | |
for chunk in chunks: | |
summary = text_summary(chunk, max_length=150, min_length=60, do_sample=False) | |
partial_summaries.append(summary[0]['summary_text']) | |
combined_summary = " ".join(partial_summaries) | |
# Final summary of all summaries | |
final_summary = text_summary(combined_summary, max_length=200, min_length=80, do_sample=False) | |
return final_summary[0]['summary_text'] | |
except Exception as e: | |
return f"β Error during summarization: {str(e)}" | |
# Gradio UI | |
gr.close_all() | |
demo = gr.Interface( | |
fn=summarize_youtube_video, | |
inputs=gr.Textbox(label="Enter YouTube Video URL", lines=1, placeholder="https://youtu.be/..."), | |
outputs=gr.Textbox(label="Video Summary", lines=10), | |
title="@RosangenAi Project 2: YouTube Video Summarizer", | |
description="Paste any YouTube video link. This app will fetch and summarize even long transcripts using Hugging Face models." | |
) | |
demo.launch() | |