#!/usr/bin/env python3 """ Voxtral ASR Fine-tuning Interface Features: - Collect a personal voice dataset (upload WAV/FLAC + transcripts or record mic audio) - Build a JSONL dataset ({audio_path, text}) at 16kHz - Fine-tune Voxtral (LoRA or full) with streamed logs - Push model to Hugging Face Hub - Deploy a Voxtral ASR demo Space Env tokens (optional): - HF_WRITE_TOKEN or HF_TOKEN: write access token - HF_READ_TOKEN: optional read token - HF_USERNAME: fallback username if not derivable from token """ from __future__ import annotations import os import json from pathlib import Path from datetime import datetime from typing import Any, Dict, Generator, Optional, Tuple import gradio as gr PROJECT_ROOT = Path(__file__).resolve().parent def get_python() -> str: import sys return sys.executable or "python" def get_username_from_token(token: str) -> Optional[str]: try: from huggingface_hub import HfApi # type: ignore api = HfApi(token=token) info = api.whoami() if isinstance(info, dict): return info.get("name") or info.get("username") if isinstance(info, str): return info except Exception: return None return None def run_command_stream(args: list[str], env: Dict[str, str], cwd: Optional[Path] = None) -> Generator[str, None, int]: import subprocess import shlex try: cmd_line = ' '.join(shlex.quote(a) for a in ([get_python()] + args)) yield f"$ {cmd_line}" process = subprocess.Popen( [get_python()] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, env=env, cwd=str(cwd or PROJECT_ROOT), bufsize=1, universal_newlines=True, ) if process.stdout is None: yield "❌ Error: Could not capture process output" return 1 for line in iter(process.stdout.readline, ""): if line.strip(): # Only yield non-empty lines yield line.rstrip() process.stdout.close() code = process.wait() if code != 0: yield f"❌ Command failed with exit code: {code}" else: yield f"✅ Command completed successfully (exit code: {code})" return code except FileNotFoundError as e: yield f"❌ Error: Python executable not found: {e}" return 1 except Exception as e: yield f"❌ Error running command: {str(e)}" return 1 def detect_nvidia_driver() -> Tuple[bool, str]: """Detect NVIDIA driver/GPU presence with multiple strategies. Returns (available, human_message). """ # 1) Try torch CUDA try: import torch # type: ignore if torch.cuda.is_available(): try: num = torch.cuda.device_count() names = [torch.cuda.get_device_name(i) for i in range(num)] return True, f"NVIDIA GPU detected: {', '.join(names)}" except Exception: return True, "NVIDIA GPU detected (torch.cuda available)" except Exception: pass # 2) Try NVML via pynvml try: import pynvml # type: ignore try: pynvml.nvmlInit() cnt = pynvml.nvmlDeviceGetCount() names = [] for i in range(cnt): h = pynvml.nvmlDeviceGetHandleByIndex(i) names.append(pynvml.nvmlDeviceGetName(h).decode("utf-8", errors="ignore")) drv = pynvml.nvmlSystemGetDriverVersion().decode("utf-8", errors="ignore") pynvml.nvmlShutdown() if cnt > 0: return True, f"NVIDIA driver {drv}; GPUs: {', '.join(names)}" except Exception: pass except Exception: pass # 3) Try nvidia-smi try: import subprocess res = subprocess.run(["nvidia-smi", "-L"], capture_output=True, text=True, timeout=3) if res.returncode == 0 and res.stdout.strip(): return True, res.stdout.strip().splitlines()[0] except Exception: pass return False, "No NVIDIA driver/GPU detected" def duplicate_space_hint() -> str: space_id = os.environ.get("SPACE_ID") or os.environ.get("HF_SPACE_ID") if space_id: space_url = f"https://huggingface.co/spaces/{space_id}" dup_url = f"{space_url}?duplicate=true" return ( f"ℹ️ No NVIDIA driver detected. If you're on Hugging Face Spaces, " f"please duplicate this Space to GPU hardware: [Duplicate this Space]({dup_url})." ) return ( "ℹ️ No NVIDIA driver detected. To enable training, run on a machine with an NVIDIA GPU/driver " "or duplicate this Space on Hugging Face with GPU hardware." ) def _write_jsonl(rows: list[dict], path: Path) -> Path: path.parent.mkdir(parents=True, exist_ok=True) with open(path, "w", encoding="utf-8") as f: for r in rows: f.write(json.dumps(r, ensure_ascii=False) + "\n") return path def _save_uploaded_dataset(files: list, transcripts: list[str]) -> str: dataset_dir = PROJECT_ROOT / "datasets" / "voxtral_user" dataset_dir.mkdir(parents=True, exist_ok=True) rows: list[dict] = [] for i, fpath in enumerate(files or []): if i >= len(transcripts): break rows.append({"audio_path": fpath, "text": transcripts[i] or ""}) jsonl_path = dataset_dir / "data.jsonl" _write_jsonl(rows, jsonl_path) return str(jsonl_path) def _push_dataset_to_hub(jsonl_path: str, repo_name: str, username: str = "") -> str: """Push dataset to Hugging Face Hub including audio files""" try: from huggingface_hub import HfApi, create_repo import json from pathlib import Path import os token = os.getenv("HF_TOKEN") or os.getenv("HF_WRITE_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") if not token: return "❌ No HF_TOKEN found. Set HF_TOKEN environment variable to push datasets." api = HfApi(token=token) # Determine full repo name if "/" not in repo_name: if not username: user_info = api.whoami() username = user_info.get("name") or user_info.get("username") or "" if username: repo_name = f"{username}/{repo_name}" # Create dataset repository try: create_repo(repo_name, repo_type="dataset", token=token, exist_ok=True) except Exception as e: if "already exists" not in str(e).lower(): return f"❌ Failed to create dataset repo: {e}" # Read the JSONL file jsonl_file = Path(jsonl_path) if not jsonl_file.exists(): return f"❌ Dataset file not found: {jsonl_path}" # Read and process the JSONL to collect audio files and update paths audio_files = [] updated_rows = [] total_audio_size = 0 with open(jsonl_file, "r", encoding="utf-8") as f: for line_num, line in enumerate(f): try: row = json.loads(line.strip()) audio_path = row.get("audio_path", "") if audio_path: audio_file = Path(audio_path) if audio_file.exists(): # Store the original file for upload audio_files.append(audio_file) total_audio_size += audio_file.stat().st_size # Update path to be relative for the dataset row["audio_path"] = f"audio/{audio_file.name}" else: print(f"⚠️ Warning: Audio file not found: {audio_path}") row["audio_path"] = "" # Clear missing files updated_rows.append(row) except json.JSONDecodeError as e: print(f"⚠️ Warning: Invalid JSON on line {line_num + 1}: {e}") continue # Create updated JSONL with relative paths temp_jsonl_path = jsonl_file.parent / "temp_data.jsonl" with open(temp_jsonl_path, "w", encoding="utf-8") as f: for row in updated_rows: f.write(json.dumps(row, ensure_ascii=False) + "\n") # Upload the updated JSONL file api.upload_file( path_or_fileobj=str(temp_jsonl_path), path_in_repo="data.jsonl", repo_id=repo_name, repo_type="dataset", token=token ) # Clean up temp file temp_jsonl_path.unlink() # Upload audio files uploaded_count = 0 for audio_file in audio_files: try: remote_path = f"audio/{audio_file.name}" api.upload_file( path_or_fileobj=str(audio_file), path_in_repo=remote_path, repo_id=repo_name, repo_type="dataset", token=token ) uploaded_count += 1 print(f"✅ Uploaded audio file: {audio_file.name}") except Exception as e: print(f"❌ Failed to upload {audio_file.name}: {e}") # Calculate total dataset size total_dataset_size = jsonl_file.stat().st_size + total_audio_size # Create README for the dataset readme_content = f"""--- dataset_info: features: - name: audio_path dtype: string - name: text dtype: string splits: - name: train num_bytes: {jsonl_file.stat().st_size} num_examples: {len(updated_rows)} download_size: {total_dataset_size} dataset_size: {total_dataset_size} tags: - voxtral - asr - speech-to-text - fine-tuning - audio-dataset --- # Voxtral ASR Dataset This dataset was created using the Voxtral ASR Fine-tuning Interface. ## Dataset Structure - **audio_path**: Relative path to the audio file (stored in `audio/` directory) - **text**: Transcription of the audio ## Dataset Statistics - **Number of examples**: {len(updated_rows)} - **Audio files uploaded**: {uploaded_count} - **Total dataset size**: {total_dataset_size:,} bytes ## Usage ```python from datasets import load_dataset, Audio # Load dataset dataset = load_dataset("{repo_name}") # Load audio data dataset = dataset.cast_column("audio_path", Audio()) # Access first example print(dataset[0]["text"]) print(dataset[0]["audio_path"]) ``` ## Loading with Audio Decoding ```python from datasets import load_dataset, Audio # Load with automatic audio decoding dataset = load_dataset("{repo_name}") dataset = dataset.cast_column("audio_path", Audio(sampling_rate=16000)) # The audio column will contain the decoded audio arrays audio_array = dataset[0]["audio_path"]["array"] sampling_rate = dataset[0]["audio_path"]["sampling_rate"] ``` """ # Upload README readme_path = jsonl_file.parent / "README.md" with open(readme_path, "w") as f: f.write(readme_content) api.upload_file( path_or_fileobj=str(readme_path), path_in_repo="README.md", repo_id=repo_name, repo_type="dataset", token=token ) readme_path.unlink() # Clean up temp file return f"✅ Dataset pushed to: https://huggingface.co/datasets/{repo_name}\n📊 Uploaded {len(updated_rows)} examples and {uploaded_count} audio files" except Exception as e: return f"❌ Failed to push dataset: {e}" def _save_recordings(recordings: list[tuple[int, list]], transcripts: list[str]) -> str: import soundfile as sf dataset_dir = PROJECT_ROOT / "datasets" / "voxtral_user" wav_dir = dataset_dir / "wavs" wav_dir.mkdir(parents=True, exist_ok=True) rows: list[dict] = [] for i, rec in enumerate(recordings or []): if rec is None: continue if i >= len(transcripts): break sr, data = rec out_path = wav_dir / f"rec_{i:04d}.wav" sf.write(str(out_path), data, sr) rows.append({"audio_path": str(out_path), "text": transcripts[i] or ""}) jsonl_path = dataset_dir / "data.jsonl" _write_jsonl(rows, jsonl_path) return str(jsonl_path) def start_voxtral_training( use_lora: bool, base_model: str, repo_short: str, jsonl_path: str, train_count: int, eval_count: int, batch_size: int, grad_accum: int, learning_rate: float, epochs: float, lora_r: int, lora_alpha: int, lora_dropout: float, freeze_audio_tower: bool, push_to_hub: bool, deploy_demo: bool, ) -> str: """Start Voxtral training and return collected logs as a string.""" env = os.environ.copy() write_token = env.get("HF_WRITE_TOKEN") or env.get("HF_TOKEN") read_token = env.get("HF_READ_TOKEN") username = get_username_from_token(write_token or "") or env.get("HF_USERNAME") or "" output_dir = PROJECT_ROOT / "outputs" / repo_short # Collect all logs all_logs = [] def collect_logs(generator): """Helper to collect logs from a generator.""" for line in generator: all_logs.append(line) print(line) # Also print to console for debugging try: # 1) Train script = PROJECT_ROOT / ("scripts/train_lora.py" if use_lora else "scripts/train.py") args = [str(script)] if jsonl_path: args += ["--dataset-jsonl", jsonl_path] args += [ "--model-checkpoint", base_model, "--train-count", str(train_count), "--eval-count", str(eval_count), "--batch-size", str(batch_size), "--grad-accum", str(grad_accum), "--learning-rate", str(learning_rate), "--epochs", str(epochs), "--output-dir", str(output_dir), "--save-steps", "50", ] if use_lora: args += [ "--lora-r", str(lora_r), "--lora-alpha", str(lora_alpha), "--lora-dropout", str(lora_dropout), ] if freeze_audio_tower: args += ["--freeze-audio-tower"] all_logs.append("🚀 Starting Voxtral training...") collect_logs(run_command_stream(args, env)) all_logs.append("✅ Training completed!") # 2) Push to Hub if push_to_hub: if not username: all_logs.append("❌ Cannot push to Hub: No username available. Set HF_TOKEN or HF_USERNAME.") else: repo_name = f"{username}/{repo_short}" push_args = [ str(PROJECT_ROOT / "scripts/push_to_huggingface.py"), "model", str(output_dir), repo_name, ] all_logs.append(f"📤 Pushing model to Hugging Face Hub: {repo_name}") collect_logs(run_command_stream(push_args, env)) all_logs.append("✅ Model pushed successfully!") # 3) Deploy demo Space if deploy_demo and username: deploy_args = [ str(PROJECT_ROOT / "scripts/deploy_demo_space.py"), "--hf-token", write_token or "", "--hf-username", username, "--model-id", f"{username}/{repo_short}", "--demo-type", "voxtral", "--space-name", f"{repo_short}-demo", ] all_logs.append("🚀 Deploying demo Space...") collect_logs(run_command_stream(deploy_args, env)) all_logs.append("✅ Demo Space deployed!") # Return all collected logs as a single string return "\n".join(all_logs) except Exception as e: error_msg = f"❌ Error during training: {str(e)}" all_logs.append(error_msg) print(error_msg) # Also print to console import traceback traceback.print_exc() return "\n".join(all_logs) def load_multilingual_phrases(language="en", max_phrases=None, split="train"): """Load phrases from NVIDIA Granary dataset. Uses the high-quality Granary dataset which contains speech recognition and translation data for 25 European languages. Args: language: Language code (e.g., 'en', 'de', 'fr', etc.) max_phrases: Maximum number of phrases to load (None for default 1000) split: Dataset split to use ('train', 'validation', 'test') Returns: List of transcription phrases from Granary dataset """ from datasets import load_dataset import random # Default to 1000 phrases if not specified if max_phrases is None: max_phrases = 1000 # Language code mapping for CohereLabs AYA Collection dataset # All Voxtral Mini supported languages are available in AYA Collection aya_supported_langs = { "en": "english", # English "fr": "french", # French "de": "german", # German "es": "spanish", # Spanish "it": "italian", # Italian "pt": "portuguese", # Portuguese "nl": "dutch", # Dutch "hi": "hindi" # Hindi } # Map input language to CohereLabs AYA Collection configuration aya_lang = aya_supported_langs.get(language) if not aya_lang: raise Exception(f"Language {language} not supported in CohereLabs AYA Collection dataset") try: print(f"Loading phrases from CohereLabs AYA Collection dataset for language: {language}") # Check for authentication token token = os.getenv("HF_TOKEN") or os.getenv("HF_WRITE_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") # Try to load CohereLabs AYA Collection dataset for the specified language if token: try: ds = load_dataset("CohereLabs/aya_collection_language_split", aya_lang, split="train", streaming=True, token=token) print(f"Successfully loaded CohereLabs AYA Collection {language} dataset") except Exception as e: # Fallback to other datasets print(f"CohereLabs AYA Collection {language} not available ({e}), trying alternative datasets") raise Exception("AYA Collection not available") else: print("No HF_TOKEN found for CohereLabs AYA Collection dataset") raise Exception("No token available") # Common processing for both dataset types phrases = [] count = 0 seen_phrases = set() # Sample phrases from the dataset for example in ds: if count >= max_phrases: break # Extract text from CohereLabs AYA Collection format: combine inputs and targets inputs_text = example.get("inputs", "").strip() targets_text = example.get("targets", "").strip() text = f"{inputs_text} {targets_text}".strip() # Filter for quality phrases if (text and len(text) > 10 and # Minimum length len(text) < 200 and # Maximum length to avoid very long utterances text not in seen_phrases and # Avoid duplicates not text.isdigit() and # Avoid pure numbers not all(c in "0123456789., " for c in text)): # Avoid mostly numeric phrases.append(text) seen_phrases.add(text) count += 1 if phrases: # Shuffle the phrases for variety random.shuffle(phrases) dataset_name = "CohereLabs AYA Collection" print(f"Successfully loaded {len(phrases)} phrases from {dataset_name} dataset for {language}") return phrases else: print(f"No suitable phrases found in dataset for {language}") raise Exception("No phrases found") except Exception as e: error_msg = str(e).lower() if "401" in error_msg or "unauthorized" in error_msg: print(f"CohereLabs AYA Collection authentication failed for {language}: {e}") print("This dataset requires a Hugging Face token. Please set HF_TOKEN environment variable.") else: print(f"CohereLabs AYA Collection loading failed for {language}: {e}") # Fallback to basic phrases if dataset loading fails print("Using fallback phrases") # Language-specific fallback phrases language_fallbacks = { "hi": [ "नमस्ते, आज आप कैसे हैं?", "मेरा नाम राजेश कुमार है।", "आज का मौसम बहुत अच्छा है।", "मैं हिंदी में बात करना चाहता हूं।", "कृपया धीरे और स्पष्ट बोलें।", "यह एक परीक्षण वाक्य है।", "मैं पुस्तकें पढ़ना पसंद करता हूं।", "क्या आप मेरी मदद कर सकते हैं?", "आपका फोन नंबर क्या है?", "मैं कल सुबह आऊंगा।", "धन्यवाद, आपका समय देने के लिए।", "यह जगह बहुत सुंदर है।", "मैं भोजन तैयार करना सीख रहा हूं।", "क्या यह रास्ता सही है?", "मैं स्कूल जाना चाहता हूं।", "आपकी उम्र क्या है?", "यह कितने का है?", "मैं थक गया हूं।", "आप कहां से हैं?", "चलिए पार्क में टहलते हैं।" ], "en": [ "Hello, how are you today?", "My name is John Smith.", "The weather is very nice today.", "I want to speak in English.", "Please speak slowly and clearly.", "This is a test sentence.", "I enjoy reading books.", "Can you help me?", "What is your phone number?", "I will come tomorrow morning.", "Thank you for your time.", "This place is very beautiful.", "I am learning to cook food.", "Is this the right way?", "I want to go to school.", "How old are you?", "How much does this cost?", "I am tired.", "Where are you from?", "Let's go for a walk in the park." ], "fr": [ "Bonjour, comment allez-vous aujourd'hui?", "Je m'appelle Jean Dupont.", "Le temps est très beau aujourd'hui.", "Je veux parler en français.", "Parlez lentement et clairement s'il vous plaît.", "Ceci est une phrase de test.", "J'aime lire des livres.", "Pouvez-vous m'aider?", "Quel est votre numéro de téléphone?", "Je viendrai demain matin.", "Merci pour votre temps.", "Cet endroit est très beau.", "J'apprends à cuisiner.", "Est-ce le bon chemin?", "Je veux aller à l'école.", "Quel âge avez-vous?", "Combien cela coûte-t-il?", "Je suis fatigué.", "D'où venez-vous?", "Allons nous promener dans le parc." ], "de": [ "Hallo, wie geht es Ihnen heute?", "Mein Name ist Hans Müller.", "Das Wetter ist heute sehr schön.", "Ich möchte auf Deutsch sprechen.", "Sprechen Sie bitte langsam und deutlich.", "Dies ist ein Testsatz.", "Ich lese gerne Bücher.", "Können Sie mir helfen?", "Wie ist Ihre Telefonnummer?", "Ich komme morgen früh.", "Vielen Dank für Ihre Zeit.", "Dieser Ort ist sehr schön.", "Ich lerne kochen.", "Ist das der richtige Weg?", "Ich möchte zur Schule gehen.", "Wie alt sind Sie?", "Wie viel kostet das?", "Ich bin müde.", "Woher kommen Sie?", "Lassen Sie uns im Park spazieren gehen." ], "es": [ "Hola, ¿cómo estás hoy?", "Me llamo Juan García.", "El tiempo está muy bueno hoy.", "Quiero hablar en español.", "Por favor habla despacio y claro.", "Esta es una oración de prueba.", "Me gusta leer libros.", "¿Puedes ayudarme?", "¿Cuál es tu número de teléfono?", "Vendré mañana por la mañana.", "Gracias por tu tiempo.", "Este lugar es muy bonito.", "Estoy aprendiendo a cocinar.", "¿Es este el camino correcto?", "Quiero ir a la escuela.", "¿Cuántos años tienes?", "¿Cuánto cuesta esto?", "Estoy cansado.", "¿De dónde eres?", "Vamos a caminar por el parque." ], "it": [ "Ciao, come stai oggi?", "Mi chiamo Mario Rossi.", "Il tempo è molto bello oggi.", "Voglio parlare in italiano.", "Per favore parla lentamente e chiaramente.", "Questa è una frase di prova.", "Mi piace leggere libri.", "Puoi aiutarmi?", "Qual è il tuo numero di telefono?", "Verrò domani mattina.", "Grazie per il tuo tempo.", "Questo posto è molto bello.", "Sto imparando a cucinare.", "È questa la strada giusta?", "Voglio andare a scuola.", "Quanti anni hai?", "Quanto costa questo?", "Sono stanco.", "Da dove vieni?", "Andiamo a fare una passeggiata nel parco." ], "pt": [ "Olá, como você está hoje?", "Meu nome é João Silva.", "O tempo está muito bom hoje.", "Quero falar em português.", "Por favor fale devagar e claramente.", "Esta é uma frase de teste.", "Eu gosto de ler livros.", "Você pode me ajudar?", "Qual é o seu número de telefone?", "Vou vir amanhã de manhã.", "Obrigado pelo seu tempo.", "Este lugar é muito bonito.", "Estou aprendendo a cozinhar.", "Este é o caminho certo?", "Quero ir para a escola.", "Quantos anos você tem?", "Quanto custa isso?", "Estou cansado.", "De onde você é?", "Vamos dar um passeio no parque." ], "nl": [ "Hallo, hoe gaat het vandaag met je?", "Mijn naam is Jan de Vries.", "Het weer is vandaag erg mooi.", "Ik wil in het Nederlands spreken.", "Spreek langzaam en duidelijk alstublieft.", "Dit is een testzin.", "Ik houd van het lezen van boeken.", "Kun je me helpen?", "Wat is je telefoonnummer?", "Ik kom morgenochtend.", "Bedankt voor je tijd.", "Deze plek is erg mooi.", "Ik leer koken.", "Is dit de juiste weg?", "Ik wil naar school gaan.", "Hoe oud ben je?", "Hoeveel kost dit?", "Ik ben moe.", "Waar kom je vandaan?", "Laten we een wandeling maken in het park." ] } fallback_phrases = language_fallbacks.get(language, language_fallbacks["en"]) if max_phrases: fallback_phrases = random.sample(fallback_phrases, min(max_phrases, len(fallback_phrases))) else: random.shuffle(fallback_phrases) return fallback_phrases # Initialize phrases dynamically DEFAULT_LANGUAGE = "en" # Default to English ALL_PHRASES = load_multilingual_phrases(DEFAULT_LANGUAGE, max_phrases=None) with gr.Blocks(title="Voxtral ASR Fine-tuning") as demo: has_gpu, gpu_msg = detect_nvidia_driver() if has_gpu: gr.HTML( f"""
✅ NVIDIA GPU ready — {gpu_msg}
Set HF_WRITE_TOKEN/HF_TOKEN in environment to enable Hub push.
⚠️ No NVIDIA GPU/driver detected — training requires a GPU runtime
{hint_md}
⚠️ No HF_TOKEN detected
Set HF_TOKEN environment variable to access CohereLabs AYA Collection dataset with authentic multilingual phrases. This dataset provides high-quality text in 100+ languages for all Voxtral Mini supported languages. Currently using fallback phrases for demonstration.