import spaces import gradio as gr import torch from string import punctuation import re from parler_tts import ParlerTTSForConditionalGeneration from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed from num2words import num2words device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.bfloat16 assert device == "cuda:0", "You really do not want to run this in a CPU" # attn_implementation = "flash_attention_2" # compilation_mode = "reduce-overhead" # max_input_length_tokens = 64 # Note: Text tokens max_output_length_tokens = 128 * 15 # Note: Audio tokens, ~128 per sec repo_id = "parler-tts/parler-tts-mini-multilingual-v1.1" model = ParlerTTSForConditionalGeneration.from_pretrained( repo_id, torch_dtype=torch_dtype, # attn_implementation=attn_implementation, attn_implementation="eager", device_map=device, ) text_tokenizer = AutoTokenizer.from_pretrained(repo_id) description_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large") feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id) SAMPLE_RATE = feature_extractor.sampling_rate SEED = 42 default_text = "Entender e responder em audio é outro nível" default_description = "Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise." examples = [ [ "Entender e responder em audio é outro nível", "a woman with a slightly low- pitched voice speaks slowly in a clear and close- sounding environment, but her delivery is quite monotone.", ], [ "Entender e responder em audio é outro nível", "Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.", ], [ "isso é uma solução que teria muito valor pra nós", "Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.", ], [ "isso é uma solução que teria muito valor pra nós", "Nicholas's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.", ], [ "As vezes tem uns sotaques meio bizarros, claro", "Nicholas's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.", ], [ "As vezes tem uns sotaques meio bizarros, claro", "a man speaks slowly in a distant- sounding environment with a clean audio quality, delivering his message in a monotone voice at a moderate pitch. ", ], [ "Mas em geral foi bem bom", "a man speaks slowly in a distant- sounding environment with a clean audio quality, delivering his message in a monotone voice at a moderate pitch. ", ], [ "Mas em geral foi bem bom", "A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up.", ], ] NUMBER_PATTERN = re.compile(r"\b(?PR[S\$]\s*)?(?P\d+([\,\._]\d+)?)\b") ABBREVIATION_PATTERN = r"\b[A-Z][A-Z\.]+\b" def preprocess(text: str): text = text.strip() text = text.replace("-", " ") def separate_abb(chunk): chunk = chunk.replace(".", "") return " ".join(chunk) for number in re.finditer(NUMBER_PATTERN, text): before = number.string[slice(*number.span())] after = num2words(number.group("numero").replace(',', '.'), lang="pt_BR", to="currency" if number.group("moeda") else "cardinal") text = text.replace(before, after, 1) for abv in re.findall(ABBREVIATION_PATTERN, text): if abv in text: text = text.replace(abv, separate_abb(abv), 1) if text[-1] not in punctuation: text = f"{text}." return text.strip() @spaces.GPU def gen_tts(text, description): inputs = description_tokenizer(description.strip(), return_tensors="pt").to(device) prompt = text_tokenizer(preprocess(text), return_tensors="pt").to(device) set_seed(SEED) generation = model.generate( input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask, prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0, min_new_tokens=10, max_new_tokens=max_output_length_tokens, ) audio_arr = generation.to(torch.float32).cpu().numpy().squeeze() # type: ignore return (SAMPLE_RATE, audio_arr) css = """ #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; margin-top: 10px; margin-left: auto; flex: unset !important; } #share-btn { all: initial; color: #ffffff; font-weight: 600; cursor: pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; right:0; } #share-btn * { all: unset !important; } #share-btn-container div:nth-child(-n+2){ width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } """ with gr.Blocks(css=css) as block: gr.HTML( """

Multilingual Parler-TTS 1.1 🗣️

""" ) gr.HTML( """

Parler-TTS is a training and inference library for high-fidelity text-to-speech (TTS) models.

This multilingual model supports French, Spanish, Italian, Portuguese, Polish, German, Dutch, and English. It generates high-quality speech with features that can be controlled using a simple text prompt.

By default, Parler-TTS generates 🎲 random voice characteristics. To ensure 🎯 speaker consistency across generations, try to use consistent descriptions in your prompts.

""" ) gr.HTML( """

Baseado em PHBJT/multi_parler_tts, atualizado para usar o modelo 1.1 e alterado para usar `num2words` para processar números em Português Brasileiro.

""" ) with gr.Row(): with gr.Column(): gradio_input_text = gr.Textbox( label="Input Text", lines=2, value=default_text ) gradio_description = gr.Textbox( label="Voice Description", lines=2, value=default_description ) generate_button = gr.Button("Generate Audio", variant="primary") with gr.Column(): audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", show_download_button=True) generate_button.click( fn=gen_tts, inputs=[gradio_input_text, gradio_description], outputs=[audio_out] ) gr.Examples( examples=examples, fn=gen_tts, inputs=[gradio_input_text, gradio_description], outputs=[audio_out], cache_examples=True, ) gr.HTML( """

Tips for ensuring good generation:

""" ) block.queue() block.launch(share=True)