Spaces:
Running
on
Zero
Running
on
Zero
import nltk | |
nltk.download('averaged_perceptron_tagger_eng') | |
import os | |
import torch | |
import random | |
import numpy as np | |
import gradio as gr | |
import librosa | |
import spaces | |
import torch | |
import capspeech.nar.generate as nar | |
from transformers import AutoTokenizer, set_seed | |
import soundfile as sf | |
import time | |
from huggingface_hub import snapshot_download | |
# Load models | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
print(f"Device: {device}") | |
model_list = nar.load_model(device, "CapTTS") | |
emomodel_list = nar.load_model(device, "EmoCapTTS") | |
accmodel_list = nar.load_model(device, "AccCapTTS") | |
def generate_audio_captts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg): | |
if fix_seed: | |
nar.seed_everything(random_seed) | |
if not fix_duration: | |
duration = None | |
step = int(step) | |
audio_arr = nar.run(model_list, device, duration, transcript, caption, speed, step, cfg) | |
return 24000, audio_arr | |
def generate_audio_emocaptts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg): | |
if fix_seed: | |
nar.seed_everything(random_seed) | |
if not fix_duration: | |
duration = None | |
step = int(step) | |
audio_arr = nar.run(emomodel_list, device, duration, transcript, caption, speed, step, cfg) | |
return 24000, audio_arr | |
def generate_audio_acccaptts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg): | |
if fix_seed: | |
nar.seed_everything(random_seed) | |
if not fix_duration: | |
duration = None | |
step = int(step) | |
audio_arr = nar.run(accmodel_list, device, duration, transcript, caption, speed, step, cfg) | |
return 24000, audio_arr | |
# Examples (if needed for the demo) | |
examples = [ | |
["A male speaker delivers his words in a measured pace, exhibiting a high-pitched, happy, and animated tone in a clean environment.", "We just release a new text to speech project today!",], | |
["A mature male voice, rough and husky, ideal for public speaking engagements.", "You are not your job. You are not how much money you have in the bank. You are not the car you drive. You're not the contents of your wallet."], | |
["A very young feminine, very cute voice, she speaks with a slight lisp.", "Where he got his authority, I don't know, but one thing I do know, he had none from me."], | |
["With a low-pitched voice, an elderly woman delivers her speech slowly and monotonously.", "From these genres and from these spaces, you know, and the feelings of what these games can bring."], | |
] | |
examples_acc = [ | |
["An Indian-accented professional woman's voice for client and public interaction.", "In big countries, such small things keep happening."], | |
["A young girl's English-accented voice, suitable for customer service and public engagement roles.", "She don't listen to U K rap, if it ain't Dave, or Cench."], | |
["Bright teenage girl's voice, with an American accent, ideal for client and public interaction.", "If we live forever, can we really be said to live?"], | |
["A mature, deep, and rough Scottish-accented female voice, with a hint of weakness, ideal for client and public interaction in customer service or community relations.", "When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow."], | |
] | |
examples_emo = [ | |
["She speaks in a calm, measured tone, conveying a sense of quiet, unassuming presence.", "All those moments will be lost in time, like tears in rain."], | |
["A middle-aged woman speaks in a low voice, her words dripping with disgust and annoyance.", "Why does your car smell like a dead RAT? It's absolutely vile."], | |
["A high-pitched, animated, happy male speaker delivers crisp, enunciated words in a fast speed.", "Wait a minute! I can't believe you just built a time machine!"], | |
["A young adult female, speaking in a slow, slightly low-pitched, monotone voice, exuding a profound void and quiet sadness.", "This is doctor Shaw, last survivor of the covenant. If you’re receiving this transmission, make no attempt to come to its point of origin."], | |
] | |
# CSS styling (optional) | |
css = """ | |
#col-container { | |
margin: 0 auto; | |
max-width: 1280px; | |
} | |
""" | |
# Gradio UI | |
with gr.Blocks(css=css, theme=gr.themes.Base()) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown(""" | |
## CapSpeech: A Prompt-Guided Expressive Text-to-Speech Synthesizer | |
👋 Welcome to the 🧢CapSpeech live demo. | |
🔗 Learn more about this project on the [🧢CapSpeech Homepage](https://wanghelin1997.github.io/CapSpeech-demo/). | |
📃 Licensed under CC BY-NC 4.0. | |
**🔧 Usage Tips** | |
- Quick Start: Enter a style caption and a transcript to generate expressive speech just the way you want. | |
- Model Tabs: Toggle model checkpoints by clicking the Model tab, with each checkpoint tailored for a specific downstream use case. | |
- Speed/Duration Settings: Adjust the speed and duration if the predicted speech pace sounds unnatural. | |
- Flow Matching Settings: Modify the CFG scale and sampling steps to refine prompt alignment and improve generation quality. | |
""") | |
with gr.Tab("Style-Captioned Text-to-Speech"): | |
gr.Markdown("""This checkpoint offers balanced performance and supports general style control.""") | |
# with gr.Row(equal_height=True): | |
caption_input = gr.Textbox( | |
label="Voice Style Caption", | |
info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.", | |
placeholder="Describe the desired speaking style...", | |
lines=2, | |
value="A mature male voice, rough and husky, ideal for public speaking engagements." | |
# scale=3, | |
) | |
transcript_input = gr.Textbox( | |
label="Speech Transcript/Content", | |
info="What should the speaker say?", | |
placeholder="Enter speech content...", | |
lines=2, | |
# scale=4, | |
value="You are not your job. You are not how much money you have in the bank. You are not the car you drive. You're not the contents of your wallet." | |
) | |
run_button = gr.Button("Generate", scale=1, variant="primary") | |
result = gr.Audio(label="Generated Audio", type="numpy") | |
with gr.Accordion("Speed/Duration Settings", open=True): | |
speed_ratio = gr.Slider(minimum=0.5, | |
maximum=2, | |
step=0.1, | |
value=1, | |
scale=2, | |
label="Speed", | |
info="Scale the duration predicted by the model.") | |
audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, | |
label="Audio Duration", | |
info='Manually set an audio duration.') | |
fix_length = gr.Checkbox(label="Fix Audio Duration", | |
info="Enable to use a fixed audio duration.", | |
value=False) | |
with gr.Accordion("Flow Matching Settings", open=False): | |
seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed") | |
fix_seed = gr.Checkbox(label="Fix Seed", | |
info="Enable to use a fixed random seed for reproducibility.", | |
value=False) | |
cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale") | |
step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps") | |
gr.Examples( | |
examples=examples, | |
inputs=[caption_input, transcript_input] | |
) | |
run_button.click( | |
fn=generate_audio_acccaptts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
transcript_input.submit( | |
fn=generate_audio_captts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
with gr.Tab("Accent-Captioned Text-to-Speech"): | |
gr.Markdown("""This checkpoint is specifically fine-tuned for enhanced accent control.""") | |
# with gr.Row(equal_height=True): | |
caption_input = gr.Textbox( | |
label="Voice Style Caption", | |
info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.", | |
placeholder="Describe the desired speaking style...", | |
lines=2, | |
value="A young girl's English-accented voice, suitable for customer service and public engagement roles." | |
# scale=3 | |
) | |
# with gr.Row(equal_height=True): | |
transcript_input = gr.Textbox( | |
label="Speech Transcript/Content", | |
info="What should the speaker say?", | |
placeholder="Enter speech content...", | |
lines=2, | |
value="She don't listen to U K rap, if it ain't Dave, or Cench." | |
# scale=4 | |
) | |
run_button = gr.Button("Generate", scale=1, variant="primary") | |
result = gr.Audio(label="Generated Audio", type="numpy") | |
with gr.Accordion("Speed/Duration Settings", open=True): | |
speed_ratio = gr.Slider(minimum=0.5, | |
maximum=2, | |
step=0.1, | |
value=1, | |
scale=2, | |
label="Speed", | |
info="Scale the duration predicted by the model.") | |
audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, | |
label="Audio duration", | |
info='Manually set an audio duration.') | |
fix_length = gr.Checkbox(label="Fix Audio Duration", | |
info="Enable to use a fixed audio duration.", | |
value=False) | |
with gr.Accordion("Flow Matching Settings", open=False): | |
seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed") | |
fix_seed = gr.Checkbox(label="Fix Seed", | |
info="Enable to use a fixed random seed for reproducibility.", | |
value=False) | |
cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale") | |
step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps") | |
gr.Examples( | |
examples=examples_acc, | |
inputs=[caption_input, transcript_input] | |
) | |
run_button.click( | |
fn=generate_audio_acccaptts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
transcript_input.submit( | |
fn=generate_audio_acccaptts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
with gr.Tab("Emotion-Captioned Text-to-Speech"): | |
gr.Markdown("""This checkpoint is specifically fine-tuned to enhance emotional expressiveness.""") | |
# with gr.Row(equal_height=True): | |
caption_input = gr.Textbox( | |
label="Voice Style Caption", | |
info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.", | |
placeholder="Describe the desired speaking style...", | |
lines=2, | |
value="A middle-aged woman speaks in a low voice, her words dripping with disgust and annoyance." | |
# scale=3 | |
) | |
# with gr.Row(equal_height=True): | |
transcript_input = gr.Textbox( | |
label="Speech Transcript/Content", | |
info="What should the speaker say?", | |
placeholder="Enter speech content...", | |
lines=2, | |
value="Why does your car smell like a dead RAT? It's absolutely vile." | |
# scale=4 | |
) | |
run_button = gr.Button("Generate", scale=1, variant="primary") | |
result = gr.Audio(label="Generated Audio", type="numpy") | |
with gr.Accordion("Speed/Duration Settings", open=True): | |
speed_ratio = gr.Slider(minimum=0.5, | |
maximum=2, | |
step=0.1, | |
value=1, | |
scale=2, | |
label="Speed", | |
info="Scale the duration predicted by the model.") | |
audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, | |
label="Audio Duration", | |
info='Manually set an audio duration.') | |
fix_length = gr.Checkbox(label="Fix Audio Duration", | |
info="Enable to use a fixed audio duration.", | |
value=False) | |
with gr.Accordion("Flow Matching Settings", open=False): | |
seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed") | |
fix_seed = gr.Checkbox(label="Fix Seed", | |
info="Enable to use a fixed random seed for reproducibility.", | |
value=False) | |
cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale") | |
step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps") | |
gr.Examples( | |
examples=examples_emo, | |
inputs=[caption_input, transcript_input] | |
) | |
run_button.click( | |
fn=generate_audio_emocaptts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
transcript_input.submit( | |
fn=generate_audio_emocaptts, | |
inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg], | |
outputs=[result] | |
) | |
demo.launch() |