|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from __future__ import print_function |
|
|
|
import argparse |
|
import logging |
|
logging.getLogger('matplotlib').setLevel(logging.WARNING) |
|
import os |
|
import sys |
|
import torch |
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) |
|
sys.path.append('{}/../..'.format(ROOT_DIR)) |
|
sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR)) |
|
from cosyvoice.cli.cosyvoice import CosyVoice |
|
|
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser(description='export your model for deployment') |
|
parser.add_argument('--model_dir', |
|
type=str, |
|
default='pretrained_models/CosyVoice-300M', |
|
help='local path') |
|
args = parser.parse_args() |
|
print(args) |
|
return args |
|
|
|
|
|
def main(): |
|
args = get_args() |
|
logging.basicConfig(level=logging.DEBUG, |
|
format='%(asctime)s %(levelname)s %(message)s') |
|
|
|
torch._C._jit_set_fusion_strategy([('STATIC', 1)]) |
|
torch._C._jit_set_profiling_mode(False) |
|
torch._C._jit_set_profiling_executor(False) |
|
|
|
cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_onnx=False) |
|
|
|
|
|
llm_text_encoder = cosyvoice.model.llm.text_encoder.half() |
|
script = torch.jit.script(llm_text_encoder) |
|
script = torch.jit.freeze(script) |
|
script = torch.jit.optimize_for_inference(script) |
|
script.save('{}/llm.text_encoder.fp16.zip'.format(args.model_dir)) |
|
|
|
|
|
llm_llm = cosyvoice.model.llm.llm.half() |
|
script = torch.jit.script(llm_llm) |
|
script = torch.jit.freeze(script, preserved_attrs=['forward_chunk']) |
|
script = torch.jit.optimize_for_inference(script) |
|
script.save('{}/llm.llm.fp16.zip'.format(args.model_dir)) |
|
|
|
|
|
flow_encoder = cosyvoice.model.flow.encoder |
|
script = torch.jit.script(flow_encoder) |
|
script = torch.jit.freeze(script) |
|
script = torch.jit.optimize_for_inference(script) |
|
script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir)) |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|