Krishna086 commited on
Commit
ff2efaa
·
verified ·
1 Parent(s): 395199d

Update translation.py

Browse files
Files changed (1) hide show
  1. translation.py +4 -3
translation.py CHANGED
@@ -19,7 +19,7 @@ def load_model(source_lang, target_lang):
19
  model = MarianMTModel.from_pretrained(model_name)
20
  return tokenizer, model
21
  except Exception as e:
22
- st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
23
  return _load_default_model()
24
 
25
  @st.cache_data(ttl=3600)
@@ -31,8 +31,9 @@ def translate_cached(text, source_lang, target_lang):
31
  tokenizer, model = load_model(src_code, tgt_code)
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
- translated = model.generate(**inputs, max_length=500, num_beams=2, early_stopping=True) # Reduced beams for speed
35
- return tokenizer.decode(translated[0], skip_special_tokens=True)
 
36
 
37
  def translate(text, source_lang, target_lang):
38
  if not text:
 
19
  model = MarianMTModel.from_pretrained(model_name)
20
  return tokenizer, model
21
  except Exception as e:
22
+ st.warning(f"No direct model for {source_lang} to {target_lang}. Falling back to English buffer.")
23
  return _load_default_model()
24
 
25
  @st.cache_data(ttl=3600)
 
31
  tokenizer, model = load_model(src_code, tgt_code)
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
+ translated = model.generate(**inputs, max_length=500, num_beams=2, early_stopping=True)
35
+ translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
36
+ return translated_text if translated_text.strip() else text # Fallback to input if empty
37
 
38
  def translate(text, source_lang, target_lang):
39
  if not text: