Spaces:
Runtime error
Runtime error
File size: 8,510 Bytes
dc780c5 837713d a07119c 837713d b2134f5 837713d b2134f5 80cff80 c36a256 837713d 2b1d793 837713d b2134f5 837713d b4eef1e b2134f5 b4eef1e a1cb9c1 b2134f5 a1cb9c1 65ea63c b2134f5 65ea63c b2134f5 65ea63c b2134f5 65ea63c b2134f5 65ea63c b2134f5 65ea63c b2134f5 837713d d842ce0 65ea63c b2134f5 a07119c 44bb8b9 b2134f5 44bb8b9 b2134f5 44bb8b9 b2134f5 d842ce0 a07119c b2134f5 a07119c 8d5a618 b2134f5 c36a256 837713d 276c4d0 b2134f5 276c4d0 b2134f5 837713d d842ce0 837713d dc780c5 d842ce0 dc780c5 b2134f5 dc780c5 d842ce0 85185da b2134f5 d842ce0 dc780c5 b2134f5 d842ce0 837713d b2134f5 d842ce0 b2134f5 d842ce0 837713d b2134f5 d842ce0 b2134f5 d842ce0 b2134f5 d842ce0 837713d 50a7cb9 d842ce0 cf93985 d842ce0 cf93985 d842ce0 88dfd37 837713d b2134f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
import spaces
import gradio as gr
import torch
from string import punctuation
import re
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
from num2words import num2words
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.bfloat16
assert device == "cuda:0", "You really do not want to run this in a CPU"
# attn_implementation = "flash_attention_2"
# compilation_mode = "reduce-overhead"
# max_input_length_tokens = 64 # Note: Text tokens
max_output_length_tokens = 128 * 15 # Note: Audio tokens, ~128 per sec
repo_id = "parler-tts/parler-tts-mini-multilingual-v1.1"
model = ParlerTTSForConditionalGeneration.from_pretrained(
repo_id,
torch_dtype=torch_dtype,
# attn_implementation=attn_implementation,
attn_implementation="eager",
device_map=device,
)
text_tokenizer = AutoTokenizer.from_pretrained(repo_id)
description_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
SAMPLE_RATE = feature_extractor.sampling_rate
SEED = 42
default_text = "Entender e responder em audio é outro nível"
default_description = "Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise."
examples = [
[
"Entender e responder em audio é outro nível",
"a woman with a slightly low- pitched voice speaks slowly in a clear and close- sounding environment, but her delivery is quite monotone.",
],
[
"Entender e responder em audio é outro nível",
"Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.",
],
[
"isso é uma solução que teria muito valor pra nós",
"Sophia's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.",
],
[
"isso é uma solução que teria muito valor pra nós",
"Nicholas's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.",
],
[
"As vezes tem uns sotaques meio bizarros, claro",
"Nicholas's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise.",
],
[
"As vezes tem uns sotaques meio bizarros, claro",
"a man speaks slowly in a distant- sounding environment with a clean audio quality, delivering his message in a monotone voice at a moderate pitch. ",
],
[
"Mas em geral foi bem bom",
"a man speaks slowly in a distant- sounding environment with a clean audio quality, delivering his message in a monotone voice at a moderate pitch. ",
],
[
"Mas em geral foi bem bom",
"A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up.",
],
]
NUMBER_PATTERN = re.compile(r"\b(?P<moeda>R[S\$]\s*)?(?P<numero>\d+([\,\._]\d+)?)\b")
ABBREVIATION_PATTERN = r"\b[A-Z][A-Z\.]+\b"
def preprocess(text: str):
text = text.strip()
text = text.replace("-", " ")
def separate_abb(chunk):
chunk = chunk.replace(".", "")
return " ".join(chunk)
for number in re.finditer(NUMBER_PATTERN, text):
before = number.string[slice(*number.span())]
after = num2words(number.group("numero").replace(',', '.'), lang="pt_BR", to="currency" if number.group("moeda") else "cardinal")
text = text.replace(before, after, 1)
for abv in re.findall(ABBREVIATION_PATTERN, text):
if abv in text:
text = text.replace(abv, separate_abb(abv), 1)
if text[-1] not in punctuation:
text = f"{text}."
return text.strip()
@spaces.GPU
def gen_tts(text, description):
inputs = description_tokenizer(description.strip(), return_tensors="pt").to(device)
prompt = text_tokenizer(preprocess(text), return_tensors="pt").to(device)
set_seed(SEED)
generation = model.generate(
input_ids=inputs.input_ids,
prompt_input_ids=prompt.input_ids,
attention_mask=inputs.attention_mask,
prompt_attention_mask=prompt.attention_mask,
do_sample=True,
temperature=1.0,
min_new_tokens=10,
max_new_tokens=max_output_length_tokens,
)
audio_arr = generation.to(torch.float32).cpu().numpy().squeeze() # type: ignore
return (SAMPLE_RATE, audio_arr)
css = """
#share-btn-container {
display: flex;
padding-left: 0.5rem !important;
padding-right: 0.5rem !important;
background-color: #000000;
justify-content: center;
align-items: center;
border-radius: 9999px !important;
width: 13rem;
margin-top: 10px;
margin-left: auto;
flex: unset !important;
}
#share-btn {
all: initial;
color: #ffffff;
font-weight: 600;
cursor: pointer;
font-family: 'IBM Plex Sans', sans-serif;
margin-left: 0.5rem !important;
padding-top: 0.25rem !important;
padding-bottom: 0.25rem !important;
right:0;
}
#share-btn * {
all: unset !important;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
"""
with gr.Blocks(css=css) as block:
gr.HTML(
"""
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
<h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
Multilingual Parler-TTS 1.1 🗣️
</h1>
</div>
</div>
"""
)
gr.HTML(
"""<p><a href="https://github.com/huggingface/parler-tts">Parler-TTS</a> is a training and inference library for
high-fidelity text-to-speech (TTS) models.</p>
<p>This <a href="https://huggingface.co/parler-tts/parler-tts-mini-multilingual-v1.1">multilingual model</a> supports French, Spanish, Italian, Portuguese, Polish, German, Dutch, and English. It generates high-quality speech with features that can be controlled using a simple text prompt.</p>
<p>By default, Parler-TTS generates 🎲 random voice characteristics. To ensure 🎯 <b>speaker consistency</b> across generations, try to use consistent descriptions in your prompts.</p>"""
)
gr.HTML(
"""<p>Baseado em <a href="https://huggingface.co/spaces/PHBJT/multi_parler_tts">PHBJT/multi_parler_tts</a>, atualizado para usar o modelo 1.1 e alterado para usar `num2words` para processar números em Português Brasileiro.</p>"""
)
with gr.Row():
with gr.Column():
gradio_input_text = gr.Textbox(
label="Input Text", lines=2, value=default_text
)
gradio_description = gr.Textbox(
label="Voice Description", lines=2, value=default_description
)
generate_button = gr.Button("Generate Audio", variant="primary")
with gr.Column():
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", show_download_button=True)
generate_button.click(
fn=gen_tts, inputs=[gradio_input_text, gradio_description], outputs=[audio_out]
)
gr.Examples(
examples=examples,
fn=gen_tts,
inputs=[gradio_input_text, gradio_description],
outputs=[audio_out],
cache_examples=True,
)
gr.HTML(
"""<p>Tips for ensuring good generation:
<ul>
<li>Include the term "very clear audio" to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li>
<li>Punctuation can be used to control the prosody of the generations</li>
<li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
</ul>
</p>"""
)
block.queue()
block.launch(share=True)
|