Update translation.py
Browse files- translation.py +14 -21
translation.py
CHANGED
@@ -18,8 +18,7 @@ def _load_model_pair(source_lang, target_lang):
|
|
18 |
model = MarianMTModel.from_pretrained(model_name)
|
19 |
return tokenizer, model
|
20 |
except Exception as e:
|
21 |
-
|
22 |
-
return None, None
|
23 |
|
24 |
# Load all possible model combinations with caching
|
25 |
@st.cache_resource
|
@@ -59,9 +58,8 @@ def combined_translate(text, source_lang, target_lang, default_tokenizer, defaul
|
|
59 |
translated = inter_to_tgt_tokenizer.decode(inter_to_tgt_model.generate(**inter_to_tgt_tokenizer(inter_text, return_tensors="pt", padding=True, truncation=True, max_length=1000))[0], skip_special_tokens=True) if inter_to_tgt_tokenizer else inter_text
|
60 |
return translated if translated.strip() else text
|
61 |
return inter_text
|
62 |
-
except Exception
|
63 |
-
|
64 |
-
return text
|
65 |
|
66 |
# Class to handle combined translation
|
67 |
class CombinedModel:
|
@@ -82,9 +80,8 @@ class CombinedModel:
|
|
82 |
if not encoded_outputs:
|
83 |
return torch.tensor([])
|
84 |
return torch.stack(encoded_outputs) # Stack tensors to ensure proper shape
|
85 |
-
except Exception
|
86 |
-
|
87 |
-
return torch.tensor([])
|
88 |
|
89 |
# Load appropriate translation model with caching
|
90 |
@st.cache_resource
|
@@ -104,9 +101,8 @@ def load_model(source_lang, target_lang):
|
|
104 |
return pair1
|
105 |
default_tokenizer, default_model = _load_default_model()
|
106 |
return default_tokenizer, CombinedModel(source_lang, target_lang, default_tokenizer, default_model)
|
107 |
-
except Exception
|
108 |
-
|
109 |
-
raise
|
110 |
|
111 |
# Load default translation model with caching
|
112 |
@st.cache_resource
|
@@ -116,9 +112,8 @@ def _load_default_model():
|
|
116 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
117 |
model = MarianMTModel.from_pretrained(model_name)
|
118 |
return tokenizer, model
|
119 |
-
except Exception
|
120 |
-
|
121 |
-
raise
|
122 |
|
123 |
# Translate text with caching
|
124 |
@st.cache_data
|
@@ -136,10 +131,8 @@ def translate(text, source_lang, target_lang):
|
|
136 |
if result and result.strip():
|
137 |
return result
|
138 |
else:
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
message = "This translation is not possible at this moment. Please try another language."
|
145 |
-
return f"{text} (Note: {message})"
|
|
|
18 |
model = MarianMTModel.from_pretrained(model_name)
|
19 |
return tokenizer, model
|
20 |
except Exception as e:
|
21 |
+
return None, None # Suppress error message, return None for fallback
|
|
|
22 |
|
23 |
# Load all possible model combinations with caching
|
24 |
@st.cache_resource
|
|
|
58 |
translated = inter_to_tgt_tokenizer.decode(inter_to_tgt_model.generate(**inter_to_tgt_tokenizer(inter_text, return_tensors="pt", padding=True, truncation=True, max_length=1000))[0], skip_special_tokens=True) if inter_to_tgt_tokenizer else inter_text
|
59 |
return translated if translated.strip() else text
|
60 |
return inter_text
|
61 |
+
except Exception:
|
62 |
+
return text # Suppress error, return source text
|
|
|
63 |
|
64 |
# Class to handle combined translation
|
65 |
class CombinedModel:
|
|
|
80 |
if not encoded_outputs:
|
81 |
return torch.tensor([])
|
82 |
return torch.stack(encoded_outputs) # Stack tensors to ensure proper shape
|
83 |
+
except Exception:
|
84 |
+
return torch.tensor([]) # Suppress error, return empty tensor
|
|
|
85 |
|
86 |
# Load appropriate translation model with caching
|
87 |
@st.cache_resource
|
|
|
101 |
return pair1
|
102 |
default_tokenizer, default_model = _load_default_model()
|
103 |
return default_tokenizer, CombinedModel(source_lang, target_lang, default_tokenizer, default_model)
|
104 |
+
except Exception:
|
105 |
+
raise # Allow higher-level handling if needed
|
|
|
106 |
|
107 |
# Load default translation model with caching
|
108 |
@st.cache_resource
|
|
|
112 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
113 |
model = MarianMTModel.from_pretrained(model_name)
|
114 |
return tokenizer, model
|
115 |
+
except Exception:
|
116 |
+
raise # Allow higher-level handling if needed
|
|
|
117 |
|
118 |
# Translate text with caching
|
119 |
@st.cache_data
|
|
|
131 |
if result and result.strip():
|
132 |
return result
|
133 |
else:
|
134 |
+
st.warning("This translation is not possible at this moment. Please try another language.")
|
135 |
+
return text # Return source text without additional note
|
136 |
+
except Exception:
|
137 |
+
st.warning("This translation is not possible at this moment. Please try another language.")
|
138 |
+
return text # Return source text without additional note
|
|
|
|