voice / src /infer_drt.py
DRDELATV's picture
Upload folder using huggingface_hub
70e5c60 verified
import os
import torch
import gradio as gr
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
# Modelo en Hugging Face
MODEL_ID = "DRDELATV/drtd"
TOKEN = os.getenv("HF_TOKEN") # ← Token cargado desde .env o variable de entorno
print("πŸ” Cargando modelo DRTD desde Hugging Face...")
processor = AutoProcessor.from_pretrained(MODEL_ID, token=TOKEN)
model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_ID, token=TOKEN)
model.eval()
def inferir_audio(texto):
inputs = processor(text=texto, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(**inputs)
audio = processor.batch_decode(outputs, skip_special_tokens=True)[0]
print(f"🎀 Texto sintetizado: {texto}")
return audio
demo = gr.Interface(
fn=inferir_audio,
inputs=gr.Textbox(label="πŸ“ Escribe algo..."),
outputs=gr.Textbox(label="πŸ”Š Resultado generado"),
title="🐽 TTS DRTD Kuchiyuya",
description="Convierte texto en voz usando el modelo personalizado entrenado en HuggingFace"
)
if __name__ == "__main__":
demo.launch(share=True)