import gradio as gr import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import io import librosa import tempfile def extract_waveform_animation(audio_file): # Load audio file y, sr = librosa.load(audio_file, sr=None) duration = librosa.get_duration(y=y, sr=sr) # Create a figure and axis for the animation fig, ax = plt.subplots() line, = ax.plot([], [], lw=2) ax.set_xlim(0, duration) ax.set_ylim(np.min(y), np.max(y)) # Function to initialize the animation def init(): line.set_data([], []) return line, # Function to update the animation frame def update(frame): start = frame * sr end = min(start + sr, len(y)) line.set_data(np.linspace(0, duration, num=len(y[:end])), y[:end]) return line, # Create the animation ani = FuncAnimation(fig, update, frames=np.arange(0, int(duration)), init_func=init, blit=True) # Save the animation to a temporary file with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile: ani.save(tmpfile.name, writer='ffmpeg', fps=1) video_path = tmpfile.name return video_path # Define the Gradio interface iface = gr.Interface( fn=extract_waveform_animation, inputs=gr.Audio(type="filepath"), outputs=gr.Video(), description="Upload an audio file to extract a video animation from its waveform." ) # Launch the app if __name__ == "__main__": iface.launch()