roychao19477
Upload
56efbc8
raw
history blame
3.16 kB
import shlex
import subprocess
import spaces
import torch
import gradio as gr
# install packages for mamba
def install_mamba():
#subprocess.run(shlex.split("pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu118"))
#subprocess.run(shlex.split("pip install https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.4.0/causal_conv1d-1.4.0+cu122torch2.3cxx11abiFALSE-cp310-cp310-linux_x86_64.whl"))
subprocess.run(shlex.split("pip install https://github.com/state-spaces/mamba/releases/download/v2.2.2/mamba_ssm-2.2.2+cu122torch2.3cxx11abiFALSE-cp310-cp310-linux_x86_64.whl"))
#subprocess.run(shlex.split("pip install numpy==1.26.4"))
install_mamba()
ABOUT = """
# SEMamba: Speech Enhancement
A Mamba-based model that denoises real-world audio.
Upload or record a noisy clip and click **Enhance** to hear + see its spectrogram.
"""
import torch
import yaml
import librosa
import librosa.display
import matplotlib
from models.stfts import mag_phase_stft, mag_phase_istft
from models.generator import SEMamba
from models.pcs400 import cal_pcs
ckpt = "ckpts/SEMamba_advanced.pth"
cfg_f = "recipes/SEMamba_advanced.yaml"
# load config
with open(cfg_f, 'r') as f:
cfg = yaml.safe_load(f)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cuda"
model = SEMamba(cfg).to(device)
sdict = torch.load(ckpt, map_location=device)
model.load_state_dict(sdict["generator"])
model.eval()
@spaces.GPU
def enhance(filepath):
with torch.no_grad():
# load & (if needed) resample to model SR
wav, orig_sr = librosa.load(filepath, sr=None)
if orig_sr != 16000:
wav = librosa.resample(wav, orig_sr, 16000)
# normalize β†’ tensor
x = torch.from_numpy(wav).float().to(device)
norm = torch.sqrt(len(x)/torch.sum(x**2))
x = (x*norm).unsqueeze(0)
# STFT β†’ model β†’ ISTFT
amp ,pha , _ = mag_phase_stft(x, 400, 100, 400, 0.3)
with torch.no_grad():
amp2, pha2, comp = model(amp, pha)
out = mag_phase_istft(amp2, pha2, 400, 100, 400, 0.3)
out = (out/norm).squeeze().cpu().numpy()
# back to original rate
if orig_sr != 16000:
out = librosa.resample(out, 16000, orig_sr, 'PCM_16')
# write file
sf.write("enhanced.wav", out, orig_sr)
# build spectrogram
D = librosa.stft(out, n_fft=1024, hop_length=512)
S = librosa.amplitude_to_db(np.abs(D), ref=np.max)
fig, ax = plt.subplots(figsize=(6,3))
librosa.display.specshow(S, sr=orig_sr, hop_length=512, x_axis="time", y_axis="hz", ax=ax)
ax.set_title("Enhanced Spectrogram")
plt.colorbar(format="%+2.0f dB", ax=ax)
return "enhanced.wav"#, fig
with gr.Blocks() as demo:
gr.Markdown(ABOUT)
input_audio = gr.Audio(label="Input Audio", type="filepath")
enhance_btn = gr.Button("Enhance")
output_audio = gr.Audio(label="Enhanced Audio", type="filepath")
enhance_btn.click(fn=enhance, inputs=input_audio, outputs=output_audio)
demo.queue().launch()