|
import streamlit as st |
|
from transformers import MarianTokenizer, MarianMTModel |
|
|
|
|
|
@st.cache_resource |
|
def _load_default_model(): |
|
"""Load default MarianMT model (en-fr).""" |
|
model_name = "Helsinki-NLP/opus-mt-en-fr" |
|
tokenizer = MarianTokenizer.from_pretrained(model_name) |
|
model = MarianMTModel.from_pretrained(model_name) |
|
return tokenizer, model |
|
|
|
|
|
@st.cache_resource |
|
def load_model(src_lang, tgt_lang): |
|
"""Load MarianMT model for a specific language pair.""" |
|
model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}" |
|
try: |
|
tokenizer = MarianTokenizer.from_pretrained(model_name) |
|
model = MarianMTModel.from_pretrained(model_name) |
|
return tokenizer, model |
|
except Exception as e: |
|
raise Exception(f"Model for {src_lang} to {tgt_lang} not available: {str(e)}") |
|
|
|
|
|
DEFAULT_TOKENIZER, DEFAULT_MODEL = _load_default_model() |
|
|
|
def translate(text, source_lang, target_lang): |
|
"""Translate text from source to target language.""" |
|
if not text: |
|
return "Please provide text to translate." |
|
|
|
src_code = LANGUAGES.get(source_lang) |
|
tgt_code = LANGUAGES.get(target_lang) |
|
|
|
|
|
if src_code == "en" and tgt_code == "fr": |
|
tokenizer, model = DEFAULT_TOKENIZER, DEFAULT_MODEL |
|
else: |
|
tokenizer, model = load_model(src_code, tgt_code) |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=400) |
|
translated = model.generate(**inputs) |
|
return tokenizer.decode(translated[0], skip_special_tokens=True) |
|
|
|
|
|
LANGUAGES = { |
|
"English": "en", |
|
"French": "fr", |
|
"Spanish": "es", |
|
"German": "de", |
|
"Chinese": "zh", |
|
"Arabic": "ar", |
|
"Russian": "ru", |
|
"Hindi": "hi", |
|
"Japanese": "ja" |
|
} |