project1 / app.py
dtkne's picture
Update app.py
1487f7e verified
raw
history blame
1.67 kB
import gradio as gr
import os
from moviepy.editor import VideoFileClip
from transformers import pipeline
# Load ASR (Speech-to-Text) pipeline
asr = pipeline(task="automatic-speech-recognition", model="distil-whisper/distil-small.en")
# Load Summarization model
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
def transcribe_and_summarize(video_file):
if video_file is None:
return "Error: No file provided.", ""
try:
# Extract audio from the video file
video = VideoFileClip(video_file)
audio_path = "temp_audio.wav"
video.audio.write_audiofile(audio_path, codec='pcm_s16le')
# Transcribe the extracted audio
transcription_result = asr(audio_path, return_timestamps=True)
transcribed_text = " ".join([segment['text'] for segment in transcription_result['chunks']])
# Summarize if long enough
if len(transcribed_text.split()) < 50:
summarized_text = "Text too short to summarize."
else:
summary_result = summarizer(transcribed_text, max_length=100, min_length=30, do_sample=False)
summarized_text = summary_result[0]['summary_text']
return transcribed_text, summarized_text
except Exception as e:
return f"Error: {str(e)}", ""
# Create Gradio interface
iface = gr.Interface(
fn=transcribe_and_summarize,
inputs=gr.Video(type="filepath", label="Upload Video (.mp4)"),
outputs=[
gr.Textbox(label="Transcribed Text"),
gr.Textbox(label="Summarized Text")
]
)
# Launch app
port = int(os.environ.get('PORT1', 7860))
iface.launch(share=True, server_port=port)