|
import torch |
|
from transformers import pipeline |
|
import gradio as gr |
|
|
|
|
|
pipe = pipeline( |
|
"automatic-speech-recognition", |
|
model="openai/whisper-small", |
|
chunk_length_s=30, |
|
device=-1 |
|
) |
|
|
|
|
|
def transcript_audio(audio_file): |
|
|
|
result = pipe(audio_file, batch_size=8)["text"] |
|
return result |
|
|
|
|
|
audio_input = gr.Audio(sources="upload", type="filepath") |
|
output_text = gr.Textbox() |
|
|
|
|
|
iface = gr.Interface(fn=transcript_audio, |
|
inputs=audio_input, outputs=output_text, |
|
title="Audio Transcription", |
|
description="This is a simple web app for audio transcription (English-only) using Whisper model from OpenAI.") |
|
|
|
|
|
iface.launch() |