import transformers import gradio as gr import librosa import torch import spaces import numpy as np @spaces.GPU(duration=60) def transcribe_and_respond(audio_file): try: pipe = transformers.pipeline( model='sarvamai/shuka_v1', trust_remote_code=True, device=0, torch_dtype=torch.bfloat16 ) # Load the audio file at 16kHz audio, sr = librosa.load(audio_file, sr=16000) # Print audio properties for debugging print(f"Audio dtype: {audio.dtype}, Audio shape: {audio.shape}, Sample rate: {sr}") turns = [ {'role': 'system', 'content': 'Share the Key Take Aways and Action Steps'}, {'role': 'user', 'content': '<|audio|>'} ] # Debug: Print the initial turns print(f"Initial turns: {turns}") # Call the model with the audio and prompt output = pipe({'audio': audio, 'turns': turns, 'sampling_rate': sr}, max_new_tokens=10000) # Debug: Print the final output from the model print(f"Model output: {output}") return output except Exception as e: return f"Error: {str(e)}" iface = gr.Interface( fn=transcribe_and_respond, # Allow both file upload and microphone input by providing a list of sources. inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"), outputs=[ gr.Textbox(label="Transcript"), gr.File(label="Download Transcript")], title="ShukaNotesApp", description="Meeting Notes for Indian Offices Discussions. \n Enter your audio from meetings here and the model will share the key takeaways.", live=True ) if __name__ == "__main__": iface.launch()