import streamlit as st from transformers import MarianTokenizer, MarianMTModel # Preload default model for English to French @st.cache_resource def _load_default_model(): """Load default MarianMT model (en-fr).""" model_name = "Helsinki-NLP/opus-mt-en-fr" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) return tokenizer, model # Cache other models dynamically @st.cache_resource def load_model(src_lang, tgt_lang): """Load the MarianMT model and tokenizer for a language pair.""" model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}" try: tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) return tokenizer, model except Exception as e: st.warning(f"Model for {src_lang} to {tgt_lang} not available. Falling back to en-fr.") return _load_default_model() # Fallback to preloaded en-fr model # Preload default model globally DEFAULT_TOKENIZER, DEFAULT_MODEL = _load_default_model() def translate(text, source_lang, target_lang): """Translate text from source to target language.""" if not text: return "Please provide text to translate." src_code = LANGUAGES.get(source_lang, "en") tgt_code = LANGUAGES.get(target_lang, "fr") # Attempt to load the specific model, fall back to en-fr if it fails tokenizer, model = load_model(src_code, tgt_code) # Perform translation inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=400) translated = model.generate(**inputs) return tokenizer.decode(translated[0], skip_special_tokens=True) # Dictionary of supported languages with MarianMT codes LANGUAGES = { "English": "en", "French": "fr", "Spanish": "es", "German": "de", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Hindi": "hi", "Japanese": "ja" } # Removed SUPPORTED_PAIRS to revert to original behavior