File size: 742 Bytes
6ff2047 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import os
import numpy as np
import torch
import torchaudio
from scipy.io import wavfile
import torchcrepe
def audio(filename):
"""Load audio from disk"""
return torchaudio.load(filename)
def model(device, capacity='full'):
"""Preloads model from disk"""
# Bind model and capacity
torchcrepe.infer.capacity = capacity
torchcrepe.infer.model = torchcrepe.Crepe(capacity)
# Load weights
file = os.path.join(os.path.dirname(__file__), 'assets', f'{capacity}.pth')
torchcrepe.infer.model.load_state_dict(
torch.load(file, map_location=device))
# Place on device
torchcrepe.infer.model = torchcrepe.infer.model.to(torch.device(device))
# Eval mode
torchcrepe.infer.model.eval()
|