# -*- coding: utf-8 -*- import gradio as gr import numpy as np import torch import torch.nn as nn import audiofile from tts import StyleTTS2 from textual import only_greek_or_only_latin, transliterate_number, fix_vocals import textwrap from audionar import VitsModel, VitsTokenizer language_names = ['Ancient greek', 'English', 'Deutsch', 'French', 'Hungarian', 'Romanian', 'Serbian (Approx.)'] def audionar_tts(text=None, lang='Romanian'): # https://huggingface.co/dkounadis/artificial-styletts2/blob/main/msinference.py lang_map = { 'ancient greek': 'grc', 'english': 'eng', 'deutsch': 'deu', 'french': 'fra', 'hungarian': 'hun', 'romanian': 'ron', 'serbian (approx.)': 'rmc-script_latin', } if text is None or text.strip() == '': text = 'No Txt Has been typed' fs = 16000 if lang not in language_names: # StyleTTS2 fs = 24000 text = only_greek_or_only_latin(text, lang='eng') x = _tts.inference(text, ref_s='wav/' + lang + '.wav')[0, 0, :].numpy() # 24 Khz else: # VITS lang_code = lang_map.get(lang.lower(), lang.lower().split()[0].strip()) global cached_lang_code, cached_net_g, cached_tokenizer if 'cached_lang_code' not in globals() or cached_lang_code != lang_code: cached_lang_code = lang_code cached_net_g = VitsModel.from_pretrained(f'facebook/mms-tts-{lang_code}').eval() cached_tokenizer = VitsTokenizer.from_pretrained(f'facebook/mms-tts-{lang_code}') net_g = cached_net_g tokenizer = cached_tokenizer text = only_greek_or_only_latin(text, lang=lang_code) text = transliterate_number(text, lang=lang_code) text = fix_vocals(text, lang=lang_code) sentences = textwrap.wrap(text, width=439) total_audio_parts = [] for sentence in sentences: inputs = cached_tokenizer(sentence, return_tensors="pt") with torch.no_grad(): audio_part = cached_net_g( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, lang_code=lang_code, )[0, :] total_audio_parts.append(audio_part) x = torch.cat(total_audio_parts).cpu().numpy() x = x[None, :] x = np.concatenate([0.49 * x, 0.51 * x], 0) wavfile = '_vits_.wav' audiofile.write(wavfile, x, fs) return wavfile # 2x file for [audio out & state to pass to the Emotion reco tAB] # TTS VOICES = ['jv_ID_google-gmu_04982.wav', #y 'en_US_vctk_p303.wav', # 'en_US_vctk_p306.wav', #, 'en_US_vctk_p318.wav', # y 'en_US_vctk_p269.wav', #y 'en_US_vctk_p316.wav', #y 'en_US_vctk_p362.wav', #y cls 'fr_FR_tom.wav', #y 'bn_multi_5958.wav', #y 'en_US_vctk_p287.wav', #y 'en_US_vctk_p260.wav', #y cl 'en_US_cmu_arctic_fem.wav', #t 'en_US_cmu_arctic_rms.wav', #t 'fr_FR_m-ailabs_nadine_eckert_boulet.wav', # 'en_US_vctk_p237.wav', #y 'en_US_vctk_p317.wav',# 'tn_ZA_google-nwu_0378.wav',#y 'nl_pmk.wav',#fixst 'tn_ZA_google-nwu_3342.wav',# 'ne_NP_ne-google_3997.wav', # 'tn_ZA_google-nwu_8914.wav', #t 'en_US_vctk_p238.wav', # y 'en_US_vctk_p275.wav', # y 'af_ZA_google-nwu_0184.wav',# 'af_ZA_google-nwu_8148.wav',#y 'en_US_vctk_p326.wav', #t 'en_US_vctk_p264.wav', #y 'en_US_vctk_p295.wav', # 'en_US_vctk_p294.wav', # 'en_US_vctk_p330.wav', #y 'gu_IN_cmu-indic_cmu_indic_guj_ad.wav',#y 'jv_ID_google-gmu_05219.wav',#y 'en_US_vctk_p284.wav',#y 'en_US_m-ailabs_mary_ann.wav', 'bn_multi_01701.wav',#y 'en_US_vctk_p262.wav',#y 'en_US_vctk_p243.wav', #y 'en_US_vctk_p278.wav', #y 'en_US_vctk_p250.wav', #y cl 'nl_femal.wav', #y 'en_US_vctk_p228.wav', #y 'ne_NP_ne-google_0649.wav',# 'en_US_cmu_arctic_gka.wav',#y 'en_US_vctk_p361.wav', #y 'jv_ID_google-gmu_02326.wav', #y 'tn_ZA_google-nwu_1932.wav', #y 'de_DE_thorsten-emotion_amused.wav', #y 'jv_ID_google-gmu_08002.wav', #y 'tn_ZA_google-nwu_3629.wav',#y 'en_US_vctk_p230.wav', #y 'af_ZA_google-nwu_7214.wav', #y 'nl_nathalie.wav', # 'en_US_cmu_arctic_lnh.wav',#y 'tn_ZA_google-nwu_6459.wav', #y 'tn_ZA_google-nwu_6206.wav', 'en_US_vctk_p323.wav', #y clips 'en_US_m-ailabs_judy_bieber.wav',#y 'en_US_vctk_p261.wav', #y 'fa_haaniye.wav', #y # 'en_US_vctk_p339.wav', 'tn_ZA_google-nwu_7896.wav',#y 'en_US_vctk_p258.wav', #y clps 'tn_ZA_google-nwu_7674.wav', #y 'en_US_hifi-tts_6097.wav', #y 'en_US_vctk_p304.wav', #y clps 'en_US_vctk_p307.wav', #y 'fr_FR_m-ailabs_bernard.wav', #y 'en_US_cmu_arctic_jmk.wav', #y 'ne_NP_ne-google_0283.wav', # 'en_US_vctk_p246.wav', #y 'en_US_vctk_p276.wav', # y 'style_o22050.wav', #y 'en_US_vctk_s5.wav', #y 'en_US_vctk_p268.wav', #y reduce clip 'af_ZA_google-nwu_8924.wav', #y 'en_US_vctk_p363.wav', #y # 'it_IT_mls_644.wav', 'ne_NP_ne-google_3614.wav', # 'ne_NP_ne-google_3154.wav', # 'en_US_cmu_arctic_eey.wav', # y fix styl 'tn_ZA_google-nwu_2839.wav', # y 'af_ZA_google-nwu_7130.wav', # 'ne_NP_ne-google_2139.wav', #y 'jv_ID_google-gmu_04715.wav', # 'en_US_vctk_p273.wav' # ] VOICES = [t[:-4] for t in VOICES] # crop .wav for visuals in gr.DropDown _tts = StyleTTS2().to('cpu') with gr.Blocks(theme='huggingface') as demo: with gr.Column(): text_input = gr.Textbox( label="Type text for TTS:", placeholder="Type Text for TTS", lines=4, value='Η γρηγορη καφετι αλεπου πειδαει πανω απο τον τεμπελη σκυλο.') choice_dropdown = gr.Dropdown( choices=language_names + VOICES, label="Vox", value=language_names[0]) generate_button = gr.Button("Generate Audio", variant="primary") output_audio = gr.Audio(label="TTS Output") generate_button.click( fn=audionar_tts, inputs=[text_input, choice_dropdown], outputs=[output_audio]) demo.launch(debug=True)