File size: 2,327 Bytes
9f57212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

import os
import tempfile
from flask import request, jsonify
from transformers import pipeline
import torch
import traceback

# Define a writable directory for the model cache.
# This now respects the HF_HOME environment variable set in the Dockerfile.
cache_dir = os.environ.get("HF_HOME", "./.cache")
os.makedirs(cache_dir, exist_ok=True)


print("Loading collabora/whisper-tiny-hindi model via transformers pipeline...")

# Determine device
device = "cuda:0" if torch.cuda.is_available() else "cpu"

# Initialize the ASR pipeline with the specified model
# Using the transformers pipeline is the correct way to load custom models from the Hub.
model = pipeline(
    "automatic-speech-recognition",
    model="collabora/whisper-tiny-hindi",
    device=device,
    model_kwargs={"cache_dir": cache_dir}
)

print("Whisper model loaded.")

def handle_transcribe():
    try:
        # Step 1: Validate request - looking for 'audio' key from frontend
        if 'audio' not in request.files:
            print("Error: 'audio' key not in request.files")
            return jsonify({'error': 'No audio file part in the request'}), 400
        
        file = request.files['audio']
        
        if file.filename == '':
            print("Error: No selected file")
            return jsonify({'error': 'No selected file'}), 400

        # Step 2: Use a temporary file to save the upload
        with tempfile.NamedTemporaryFile(delete=True, suffix=".webm") as temp_audio:
            file.save(temp_audio.name)
            
            print(f"Transcribing file: {temp_audio.name} with collabora/whisper-tiny-hindi pipeline")
            
            # Step 3: Transcribe using the pipeline
            # The pipeline is robust and can handle various formats directly, leveraging ffmpeg.
            result = model(temp_audio.name)
            
            transcribed_text = result.get('text', '')

            print("Transcription successful.")
            return jsonify({'text': transcribed_text})

        return jsonify({'error': 'File processing failed'}), 500
        
    except Exception as e:
        # Step 4: Robust error logging
        print("❌ Error in handle_transcribe():")
        traceback.print_exc()
        return jsonify({'error': f"An unexpected error occurred during transcription: {str(e)}"}), 500