File size: 15,493 Bytes
7461999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
9bbea8e
7461999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
import nltk
nltk.download('averaged_perceptron_tagger_eng')
import os
import torch
import random
import numpy as np
import gradio as gr
import librosa
import spaces
import torch
import capspeech.nar.generate as nar
from transformers import AutoTokenizer, set_seed
import soundfile as sf
import time
from huggingface_hub import snapshot_download


# Load models
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f"Device: {device}")
model_list = nar.load_model(device, "CapTTS")
emomodel_list = nar.load_model(device, "EmoCapTTS")
accmodel_list = nar.load_model(device, "AccCapTTS")

@spaces.GPU
def generate_audio_captts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg):
    if fix_seed:
        nar.seed_everything(random_seed)

    if not fix_duration:
        duration = None

    step = int(step)
    
    audio_arr = nar.run(model_list, device, duration, transcript, caption, speed, step, cfg)

    return 24000, audio_arr

@spaces.GPU
def generate_audio_emocaptts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg):
    if fix_seed:
        nar.seed_everything(random_seed)

    if not fix_duration:
        duration = None

    step = int(step)
    
    audio_arr = nar.run(emomodel_list, device, duration, transcript, caption, speed, step, cfg)

    return 24000, audio_arr

@spaces.GPU
def generate_audio_acccaptts(transcript, caption, random_seed, fix_seed, duration, fix_duration, speed, step, cfg):
    if fix_seed:
        nar.seed_everything(random_seed)

    if not fix_duration:
        duration = None

    step = int(step)
    
    audio_arr = nar.run(accmodel_list, device, duration, transcript, caption, speed, step, cfg)

    return 24000, audio_arr

# Examples (if needed for the demo)
examples = [
    ["A male speaker delivers his words in a measured pace, exhibiting a high-pitched, happy, and animated tone in a clean environment.", "We just release a new text to speech project today!",],
    ["A mature male voice, rough and husky, ideal for public speaking engagements.", "You are not your job. You are not how much money you have in the bank. You are not the car you drive. You're not the contents of your wallet."],
    ["A very young feminine, very cute voice, she speaks with a slight lisp.", "Where he got his authority, I don't know, but one thing I do know, he had none from me."],
    ["With a low-pitched voice, an elderly woman delivers her speech slowly and monotonously.", "From these genres and from these spaces, you know, and the feelings of what these games can bring."],
]

examples_acc = [
    ["An Indian-accented professional woman's voice for client and public interaction.", "In big countries, such small things keep happening."],
    ["A young girl's English-accented voice, suitable for customer service and public engagement roles.", "She don't listen to U K rap, if it ain't Dave, or Cench."],
    ["Bright teenage girl's voice, with an American accent, ideal for client and public interaction.", "If we live forever, can we really be said to live?"],
    ["A mature, deep, and rough Scottish-accented female voice, with a hint of weakness, ideal for client and public interaction in customer service or community relations.", "When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow."],
]

examples_emo = [
    ["She speaks in a calm, measured tone, conveying a sense of quiet, unassuming presence.", "All those moments will be lost in time, like tears in rain."],
    ["A middle-aged woman speaks in a low voice, her words dripping with disgust and annoyance.", "Why does your car smell like a dead RAT? It's absolutely vile."],
    ["A high-pitched, animated, happy male speaker delivers crisp, enunciated words in a fast speed.", "Wait a minute! I can't believe you just built a time machine!"],
    ["A young adult female, speaking in a slow, slightly low-pitched, monotone voice, exuding a profound void and quiet sadness.", "This is doctor Shaw, last survivor of the covenant. If you’re receiving this transmission, make no attempt to come to its point of origin."],
]

# CSS styling (optional)
css = """
#col-container {
    margin: 0 auto;
    max-width: 1280px;
}
"""

# Gradio UI
with gr.Blocks(css=css, theme=gr.themes.Base()) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("""
                    ## CapSpeech: A Prompt-Guided Expressive Text-to-Speech Synthesizer
                    
                    👋 Welcome to the 🧢CapSpeech live demo.
                    
                    🔗 Learn more about this project on the [🧢CapSpeech Homepage](https://wanghelin1997.github.io/CapSpeech-demo/).
                    
                    📃 Licensed under CC BY-NC 4.0.
                    
                    **🔧 Usage Tips**
                    - Quick Start: Enter a style caption and a transcript to generate expressive speech just the way you want.
                    - Model Tabs: Toggle model checkpoints by clicking the Model tab, with each checkpoint tailored for a specific downstream use case.
                    - Speed/Duration Settings: Adjust the speed and duration if the predicted speech pace sounds unnatural.
                    - Flow Matching Settings: Modify the CFG scale and sampling steps to refine prompt alignment and improve generation quality.
                    """)

        with gr.Tab("Style-Captioned Text-to-Speech"):
            gr.Markdown("""This checkpoint offers balanced performance and supports general style control.""")
            # with gr.Row(equal_height=True):
            caption_input = gr.Textbox(
                label="Voice Style Caption",
                info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.",
                placeholder="Describe the desired speaking style...",
                lines=2,
                value="A mature male voice, rough and husky, ideal for public speaking engagements."
                # scale=3,
            )

            transcript_input = gr.Textbox(
                label="Speech Transcript/Content",
                info="What should the speaker say?",
                placeholder="Enter speech content...",
                lines=2,
                # scale=4,
                value="You are not your job. You are not how much money you have in the bank. You are not the car you drive. You're not the contents of your wallet."
            )
                
            run_button = gr.Button("Generate", scale=1, variant="primary")

            result = gr.Audio(label="Generated Audio", type="numpy")

            with gr.Accordion("Speed/Duration Settings", open=True):
                speed_ratio = gr.Slider(minimum=0.5,
                                        maximum=2, 
                                        step=0.1, 
                                        value=1,
                                        scale=2,
                                        label="Speed", 
                                        info="Scale the duration predicted by the model.")
                audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, 
                                         label="Audio Duration", 
                                         info='Manually set an audio duration.')
                fix_length = gr.Checkbox(label="Fix Audio Duration", 
                                         info="Enable to use a fixed audio duration.", 
                                         value=False)
                
            with gr.Accordion("Flow Matching Settings", open=False):
                seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed")
                fix_seed = gr.Checkbox(label="Fix Seed", 
                                       info="Enable to use a fixed random seed for reproducibility.", 
                                       value=False)
                cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale")
                step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps")

            gr.Examples(
                examples=examples,
                inputs=[caption_input, transcript_input]
            )

            run_button.click(
                fn=generate_audio_acccaptts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )

            transcript_input.submit(
                fn=generate_audio_captts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )
            
        with gr.Tab("Accent-Captioned Text-to-Speech"):
            gr.Markdown("""This checkpoint is specifically fine-tuned for enhanced accent control.""")
            # with gr.Row(equal_height=True):
            caption_input = gr.Textbox(
                label="Voice Style Caption",
                info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.",
                placeholder="Describe the desired speaking style...",
                lines=2,
                value="A young girl's English-accented voice, suitable for customer service and public engagement roles."
                # scale=3
            )

            # with gr.Row(equal_height=True):
            transcript_input = gr.Textbox(
                label="Speech Transcript/Content",
                info="What should the speaker say?",
                placeholder="Enter speech content...",
                lines=2,
                value="She don't listen to U K rap, if it ain't Dave, or Cench."
                # scale=4
            )
                
            run_button = gr.Button("Generate", scale=1, variant="primary")

            result = gr.Audio(label="Generated Audio", type="numpy")

            with gr.Accordion("Speed/Duration Settings", open=True):
                speed_ratio = gr.Slider(minimum=0.5,
                                        maximum=2, 
                                        step=0.1, 
                                        value=1,
                                        scale=2,
                                        label="Speed", 
                                        info="Scale the duration predicted by the model.")
                audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, 
                                         label="Audio duration", 
                                         info='Manually set an audio duration.')
                fix_length = gr.Checkbox(label="Fix Audio Duration", 
                                         info="Enable to use a fixed audio duration.", 
                                         value=False)
                
            with gr.Accordion("Flow Matching Settings", open=False):
                seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed")
                fix_seed = gr.Checkbox(label="Fix Seed", 
                                       info="Enable to use a fixed random seed for reproducibility.", 
                                       value=False)
                cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale")
                step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps")

            gr.Examples(
                examples=examples_acc,
                inputs=[caption_input, transcript_input]
            )

            run_button.click(
                fn=generate_audio_acccaptts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )

            transcript_input.submit(
                fn=generate_audio_acccaptts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )
            
        with gr.Tab("Emotion-Captioned Text-to-Speech"):
            gr.Markdown("""This checkpoint is specifically fine-tuned to enhance emotional expressiveness.""")
            # with gr.Row(equal_height=True):
            caption_input = gr.Textbox(
                label="Voice Style Caption",
                info="How should the speaker sound? Think timbre, pace, emotion, accent, etc.",
                placeholder="Describe the desired speaking style...",
                lines=2,
                value="A middle-aged woman speaks in a low voice, her words dripping with disgust and annoyance."
                # scale=3
            )

            # with gr.Row(equal_height=True):
            transcript_input = gr.Textbox(
                label="Speech Transcript/Content",
                info="What should the speaker say?",
                placeholder="Enter speech content...",
                lines=2,
                value="Why does your car smell like a dead RAT? It's absolutely vile."
                # scale=4
            )
                
            run_button = gr.Button("Generate", scale=1, variant="primary")

            result = gr.Audio(label="Generated Audio", type="numpy")

            with gr.Accordion("Speed/Duration Settings", open=True):
                speed_ratio = gr.Slider(minimum=0.5,
                                        maximum=2, 
                                        step=0.1, 
                                        value=1,
                                        scale=2,
                                        label="Speed", 
                                        info="Scale the duration predicted by the model.")
                audio_length = gr.Slider(minimum=1, maximum=20, step=1, value=0, 
                                         label="Audio Duration", 
                                         info='Manually set an audio duration.')
                fix_length = gr.Checkbox(label="Fix Audio Duration", 
                                         info="Enable to use a fixed audio duration.", 
                                         value=False)
                
            with gr.Accordion("Flow Matching Settings", open=False):
                seed = gr.Slider(minimum=0, maximum=100, step=1, value=42, label="Seed")
                fix_seed = gr.Checkbox(label="Fix Seed", 
                                       info="Enable to use a fixed random seed for reproducibility.", 
                                       value=False)
                cfg = gr.Slider(minimum=1.0, maximum=5.0, step=0.5, value=2.0, label="CFG Scale")
                step = gr.Slider(minimum=20, maximum=100, step=1, value=32, label="Flow Steps")

            gr.Examples(
                examples=examples_emo,
                inputs=[caption_input, transcript_input]
            )

            run_button.click(
                fn=generate_audio_emocaptts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )

            transcript_input.submit(
                fn=generate_audio_emocaptts,
                inputs=[transcript_input, caption_input, seed, fix_seed, audio_length, fix_length, speed_ratio, step, cfg],
                outputs=[result]
            )

demo.launch()