SohomToom's picture
Update app.py
9a8ac22 verified
import os
os.environ["NUMBA_DISABLE_CACHE"] = "1"
import os
import gradio as gr
from docx import Document
from TTS.api import TTS
import tempfile
import zipfile
from io import BytesIO
import re
from pydub import AudioSegment
final_audio = AudioSegment.empty()
# Voice model
VOICE_MODEL = "tts_models/en/vctk/vits"
# Embedded metadata (from your file)
SPEAKER_METADATA = {
300: { "age": 23, "gender": "F", "accent": "American"},
271: { "age": 19, "gender": "M", "accent": "Scottish"},
287: { "age": 23, "gender": "M", "accent": "English"},
262: { "age": 23, "gender": "F", "accent": "Scottish"},
284: { "age": 20, "gender": "M", "accent": "Scottish"},
297: { "age": 20, "gender": "F", "accent": "American"},
227: { "age": 38, "gender": "M", "accent": "English"},
246: { "age": 22, "gender": "M", "accent": "Scottish"},
225: { "age": 23, "gender": "F", "accent": "English"},
259: { "age": 23, "gender": "M", "accent": "English"},
252: { "age": 22, "gender": "M", "accent": "Scottish"},
231: { "age": 23, "gender": "F", "accent": "English"},
266: { "age": 22, "gender": "F", "accent": "Irish"},
241: { "age": 21, "gender": "M", "accent": "Scottish"},
312: { "age": 19, "gender": "F", "accent": "Canadian"},
329: { "age": 23, "gender": "F", "accent": "American"},
232: { "age": 23, "gender": "M", "accent": "English"},
305: { "age": 19, "gender": "F", "accent": "American"},
311: { "age": 21, "gender": "M", "accent": "American"},
301: { "age": 23, "gender": "F", "accent": "American"},
304: { "age": 22, "gender": "M", "accent": "NorthernIrish"},
310: { "age": 21, "gender": "F", "accent": "American"},
260: { "age": 21, "gender": "M", "accent": "Scottish"},
315: { "age": 18, "gender": "M", "accent": "American"},
374: { "age": 28, "gender": "M", "accent": "Australian"},
364: { "age": 23, "gender": "M", "accent": "Irish"},
269: { "age": 20, "gender": "F", "accent": "English"},
345: { "age": 22, "gender": "M", "accent": "American"},
326: { "age": 26, "gender": "M", "accent": "Australian"},
343: { "age": 27, "gender": "F", "accent": "Canadian"},
230: { "age": 22, "gender": "F", "accent": "English"},
376: { "age": 22, "gender": "M", "accent": "Indian"},
240: { "age": 21, "gender": "F", "accent": "English"},
298: { "age": 19, "gender": "M", "accent": "Irish"},
272: { "age": 23, "gender": "M", "accent": "Scottish"},
248: { "age": 23, "gender": "F", "accent": "Indian"},
264: { "age": 23, "gender": "F", "accent": "Scottish"},
250: { "age": 22, "gender": "F", "accent": "English"},
292: { "age": 23, "gender": "M", "accent": "NorthernIrish"},
237: { "age": 22, "gender": "M", "accent": "Scottish"},
363: { "age": 22, "gender": "M", "accent": "Canadian"},
313: { "age": 24, "gender": "F", "accent": "Irish"},
285: { "age": 21, "gender": "M", "accent": "Scottish"},
268: { "age": 23, "gender": "F", "accent": "English"},
302: { "age": 20, "gender": "M", "accent": "Canadian"},
261: { "age": 26, "gender": "F", "accent": "NorthernIrish"},
336: { "age": 18, "gender": "F", "accent": "SouthAfrican"},
288: { "age": 22, "gender": "F", "accent": "Irish"},
226: { "age": 22, "gender": "M", "accent": "English"},
277: { "age": 23, "gender": "F", "accent": "English"},
360: { "age": 19, "gender": "M", "accent": "American"},
257: { "age": 24, "gender": "F", "accent": "English"},
254: { "age": 21, "gender": "M", "accent": "English"},
339: { "age": 21, "gender": "F", "accent": "American"},
323: { "age": 19, "gender": "F", "accent": "SouthAfrican"},
255: { "age": 19, "gender": "M", "accent": "Scottish"},
249: { "age": 22, "gender": "F", "accent": "Scottish"},
293: { "age": 22, "gender": "F", "accent": "NorthernIrish"},
244: { "age": 22, "gender": "F", "accent": "English"},
245: { "age": 25, "gender": "M", "accent": "Irish"},
361: { "age": 19, "gender": "F", "accent": "American"},
314: { "age": 26, "gender": "F", "accent": "SouthAfrican"},
308: { "age": 18, "gender": "F", "accent": "American"},
229: { "age": 23, "gender": "F", "accent": "English"},
341: { "age": 26, "gender": "F", "accent": "American"},
275: { "age": 23, "gender": "M", "accent": "Scottish"},
263: { "age": 22, "gender": "M", "accent": "Scottish"},
253: { "age": 22, "gender": "F", "accent": "Welsh"},
299: { "age": 25, "gender": "F", "accent": "American"},
316: { "age": 20, "gender": "M", "accent": "Canadian"},
282: { "age": 23, "gender": "F", "accent": "English"},
362: { "age": 29, "gender": "F", "accent": "American"},
294: { "age": 33, "gender": "F", "accent": "American"},
274: { "age": 22, "gender": "M", "accent": "English"},
279: { "age": 23, "gender": "M", "accent": "English"},
281: { "age": 29, "gender": "M", "accent": "Scottish"},
286: { "age": 23, "gender": "M", "accent": "English"},
258: { "age": 22, "gender": "M", "accent": "English"},
247: { "age": 22, "gender": "M", "accent": "Scottish"},
351: { "age": 21, "gender": "F", "accent": "NorthernIrish"},
283: { "age": 24, "gender": "F", "accent": "Irish"},
334: { "age": 18, "gender": "M", "accent": "American"},
333: { "age": 19, "gender": "F", "accent": "American"},
295: { "age": 23, "gender": "F", "accent": "Irish"},
330: { "age": 26, "gender": "F", "accent": "American"},
335: { "age": 25, "gender": "F", "accent": "NewZealand"},
228: { "age": 22, "gender": "F", "accent": "English"},
267: { "age": 23, "gender": "F", "accent": "English"},
273: { "age": 18, "gender": "F", "accent": "English"}
}
def clean_text(text):
# Remove hyperlinks
return re.sub(r'http[s]?://\S+', '', text)
def extract_paragraphs_from_docx(docx_file):
document = Document(docx_file.name)
paragraphs = [p.text.strip() for p in document.paragraphs if p.text.strip()]
return [clean_text(p) for p in paragraphs]
def list_speaker_choices():
return [f"{sid} | {meta['gender']} | {meta['accent']}" for sid, meta in SPEAKER_METADATA.items()]
def get_speaker_id_from_label(label):
return label.split('|')[0].strip()
def generate_sample_audio(sample_text, speaker_label):
if len(sample_text) > 500:
raise gr.Error("Sample text exceeds 500 characters.")
speaker_id = get_speaker_id_from_label(speaker_label)
model = TTS("tts_models/en/vctk/vits")
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_wav:
model.tts_to_file(text=sample_text, speaker="p"+speaker_id, file_path=tmp_wav.name)
return tmp_wav.name
def generate_audio(docx_file, speaker_label):
speaker_id = get_speaker_id_from_label(speaker_label)
model = TTS("tts_models/en/vctk/vits")
paragraphs = extract_paragraphs_from_docx(docx_file)
combined_audio = AudioSegment.empty()
temp_files = []
try:
for idx, para in enumerate(paragraphs):
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
model.tts_to_file(text=para, speaker="p"+speaker_id, file_path=tmp.name)
audio_chunk = AudioSegment.from_wav(tmp.name)
combined_audio += audio_chunk
temp_files.append(tmp.name)
tmp.close()
except Exception as e:
print("Generation interrupted. Saving partial output.", e)
output_dir = tempfile.mkdtemp()
final_output_path = os.path.join(output_dir, "final_output.wav")
combined_audio.export(final_output_path, format="wav")
zip_path = os.path.join(output_dir, "output.zip")
with zipfile.ZipFile(zip_path, 'w') as zipf:
zipf.write(final_output_path, arcname="final_output.wav")
for f in temp_files:
os.remove(f)
return zip_path
# --- UI ---
speaker_choices = list_speaker_choices()
with gr.Blocks() as demo:
gr.Markdown("## 📄 TTS Voice Generator with Paragraph-Wise Processing")
with gr.Row():
speaker_dropdown = gr.Dropdown(label="Select Voice", choices=speaker_choices)
with gr.Row():
sample_textbox = gr.Textbox(label="Enter Sample Text (Max 500 characters)", max_lines=5)
sample_button = gr.Button("Generate Sample")
clear_button = gr.Button("Clear Sample")
sample_audio = gr.Audio(label="Sample Output", type="filepath")
sample_button.click(fn=generate_sample_audio, inputs=[sample_textbox, speaker_dropdown], outputs=[sample_audio])
clear_button.click(fn=lambda: None, inputs=[], outputs=[sample_audio])
with gr.Row():
docx_input = gr.File(label="Upload DOCX File", file_types=[".docx"])
generate_button = gr.Button("Generate Full Audio")
download_output = gr.File(label="Download Output Zip")
generate_button.click(fn=generate_audio, inputs=[docx_input, speaker_dropdown], outputs=[download_output])
if __name__ == "__main__":
demo.launch()