import os import tempfile from flask import request, jsonify from transformers import pipeline import torch import traceback # Define a writable directory for the model cache. # This now respects the HF_HOME environment variable set in the Dockerfile. cache_dir = os.environ.get("HF_HOME", "/tmp/.cache") os.makedirs(cache_dir, exist_ok=True) print("Loading openai/whisper-tiny model via transformers pipeline...") # Determine device device = "cuda:0" if torch.cuda.is_available() else "cpu" # Initialize the ASR pipeline with the lightweight model model = pipeline( "automatic-speech-recognition", model="openai/whisper-tiny", device=device, model_kwargs={"cache_dir": cache_dir} ) print("Whisper model loaded.") def handle_transcribe(): try: # Step 1: Validate request - looking for 'audio' key from frontend if 'audio' not in request.files: print("Error: 'audio' key not in request.files") return jsonify({'error': 'No audio file part in the request'}), 400 file = request.files['audio'] if file.filename == '': print("Error: No selected file") return jsonify({'error': 'No selected file'}), 400 # Step 2: Use a temporary file to save the upload with tempfile.NamedTemporaryFile(delete=True, suffix=".webm") as temp_audio: file.save(temp_audio.name) print(f"Transcribing file: {temp_audio.name} with openai/whisper-tiny pipeline for Hindi.") # Step 3: Transcribe using the pipeline with language-specific configuration # This tells Whisper to process the audio as Hindi. result = model( temp_audio.name, generate_kwargs={"language": "hindi", "task": "transcribe"} ) transcribed_text = result.get('text', '') print("Transcription successful.") return jsonify({'text': transcribed_text}) except Exception as e: # Step 4: Robust error logging print("❌ Error in handle_transcribe():") traceback.print_exc() return jsonify({'error': f"An unexpected error occurred during transcription: {str(e)}"}), 500