wav2vec-api / app.py
KuyaToto's picture
Update app.py
8591444 verified
import gradio as gr
import torch
import numpy as np
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from scipy.signal import resample
# Load model and processor
model_id = "facebook/wav2vec2-large-960h-lv60-self"
processor = Wav2Vec2Processor.from_pretrained(model_id)
model = Wav2Vec2ForCTC.from_pretrained(model_id)
# Transcription function
def transcribe(audio_tuple):
if audio_tuple is None:
return "⚠️ No audio received."
audio, sample_rate = audio_tuple
if sample_rate is None or audio is None:
return "⚠️ Audio or sample rate missing."
# Flatten if stereo (2D) to mono (1D)
if len(audio.shape) == 2:
audio = np.mean(audio, axis=1)
# Resample if not 16000Hz
if sample_rate != 16000:
number_of_samples = round(len(audio) * 16000 / sample_rate)
audio = resample(audio, number_of_samples)
# Normalize to [-1, 1]
audio = audio.astype(np.float32)
# Process audio
input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
# Inference
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)[0]
return transcription.lower()
# Gradio interface
demo = gr.Interface(
fn=transcribe,
inputs=gr.Audio(sources=["microphone"], type="numpy", label="🎀 Speak a word or letter"),
outputs=gr.Textbox(label="πŸ“ Transcription"),
title="πŸ”€ Wav2Vec2 Speech Transcriber",
description="Speak into the mic and get real-time transcription using Hugging Face Wav2Vec2."
)
demo.launch()