`# This file contains the translation logic` `import torch` `from transformers import AutoModelForSeq2SeqLM, AutoTokenizer` `def translate_text(text):` ` model_name = "t5-small"` ` tokenizer = AutoTokenizer.from_pretrained(model_name)` ` model = AutoModelForSeq2SeqLM.from_pretrained(model_name)` ` inputs = tokenizer.encode_plus(text,` ` add_special_tokens=True,` ` max_length=512,` ` return_attention_mask=True,` ` return_tensors='pt',` ` truncation=True,` ` )` ` outputs = model.generate(**inputs)` ` translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)` ` return translated_text`