Krishna086's picture
Update translation.py
e22e364 verified
raw
history blame
1.97 kB
import streamlit as st
from transformers import MarianTokenizer, MarianMTModel
# Preload default model for English to French
@st.cache_resource
def _load_default_model():
"""Load default MarianMT model (en-fr)."""
model_name = "Helsinki-NLP/opus-mt-en-fr"
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return tokenizer, model
# Cache other models dynamically
@st.cache_resource
def load_model(src_lang, tgt_lang):
"""Load MarianMT model for a specific language pair."""
model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
try:
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return tokenizer, model
except Exception as e:
raise Exception(f"Model for {src_lang} to {tgt_lang} not available: {str(e)}")
# Preload default model globally
DEFAULT_TOKENIZER, DEFAULT_MODEL = _load_default_model()
def translate(text, source_lang, target_lang):
"""Translate text from source to target language."""
if not text:
return "Please provide text to translate."
src_code = LANGUAGES.get(source_lang)
tgt_code = LANGUAGES.get(target_lang)
# Use preloaded model if en-fr, else load dynamically
if src_code == "en" and tgt_code == "fr":
tokenizer, model = DEFAULT_TOKENIZER, DEFAULT_MODEL
else:
tokenizer, model = load_model(src_code, tgt_code)
# Perform translation
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=400)
translated = model.generate(**inputs)
return tokenizer.decode(translated[0], skip_special_tokens=True)
# Language dictionary (limited for speed)
LANGUAGES = {
"English": "en",
"French": "fr",
"Spanish": "es",
"German": "de",
"Chinese": "zh",
"Arabic": "ar",
"Russian": "ru",
"Hindi": "hi",
"Japanese": "ja"
}