Spaces:
Runtime error
Runtime error
File size: 855 Bytes
3bacd7a 033a32d a10c262 033a32d a10c262 3bacd7a a10c262 3bacd7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import gradio as gr
from transformers import AutoProcessor, AutoModel
import torch
import torchaudio
import os
HF_TOKEN = os.getenv("hear2")
processor = AutoProcessor.from_pretrained("google/hear-pytorch", use_auth_token=HF_TOKEN)
model = AutoModel.from_pretrained("google/hear-pytorch", use_auth_token=HF_TOKEN)
def predict(audio):
waveform, sample_rate = torchaudio.load(audio)
inputs = processor(waveform, sampling_rate=sample_rate, return_tensors="pt")
with torch.no_grad():
embeddings = model(**inputs).last_hidden_state
return f"Embedding shape: {embeddings.shape}"
iface = gr.Interface(
fn=predict,
inputs=gr.Audio(type="filepath"),
outputs="text",
title="HeAR PyTorch Demo",
description="Upload an audio file to generate health acoustic embeddings."
)
if __name__ == "__main__":
iface.launch()
|