nx_noise / streamlit /nx_noise_app.py
HoneyTian's picture
update
58f5730
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import asyncio
from enum import Enum
from io import BytesIO
import os
from pathlib import Path
import shutil
import sys
from typing import List
import uuid
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../"))
import edge_tts
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from streamlit_shortcuts import shortcut_button
from project_settings import project_path, temp_dir
# ENTRYPOINT ["streamlit", "run", "streamlit/nx_noise_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
# streamlit run streamlit/nx_noise_app.py --server.port=8501 --server.address=0.0.0.0
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--src_dir",
default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14").as_posix(),
type=str
)
parser.add_argument(
"--tgt_dir",
default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14/finished").as_posix(),
type=str
)
args = parser.parse_args()
return args
class Labels(Enum):
noise = "noise"
noisy = "noisy"
async def edge_tts_text_to_speech(text: str, speaker: str = "zh-CN-XiaoxiaoNeural"):
communicate = edge_tts.Communicate(text, speaker)
audio_file = temp_dir / f"{uuid.uuid4()}.wav"
audio_file = audio_file.as_posix()
await communicate.save(audio_file)
return audio_file
def generate_spectrogram(filename: str, title: str = "Spectrogram"):
signal, sample_rate = librosa.load(filename, sr=None)
mag = np.abs(librosa.stft(signal))
# mag_db = librosa.amplitude_to_db(mag, ref=np.max)
mag_db = librosa.amplitude_to_db(mag, ref=20)
plt.figure(figsize=(10, 4))
librosa.display.specshow(mag_db, sr=sample_rate)
plt.title(title)
buf = BytesIO()
plt.savefig(buf, format="png", bbox_inches="tight")
plt.close()
buf.seek(0)
return buf
def when_click_annotation_button(filename: Path, label: str, tgt_dir: Path):
sub_tgt_dir = tgt_dir / label
sub_tgt_dir.mkdir(parents=True, exist_ok=True)
shutil.move(filename.as_posix(), sub_tgt_dir)
def main():
args = get_args()
src_dir = Path(args.src_dir)
tgt_dir = Path(args.tgt_dir)
# @st.cache_data
# def get_shortcut_audio():
# result = {
# Labels.noise.value: asyncio.run(edge_tts_text_to_speech("噪音")),
# Labels.noisy.value: asyncio.run(edge_tts_text_to_speech("加噪语音")),
# }
# return result
#
# shortcut_audio = get_shortcut_audio()
# 获取文件列表
audio_files: List[Path] = [filename for filename in src_dir.glob("**/*.wav")]
if len(audio_files) == 0:
st.error("没有未标注的音频了。")
st.stop()
audio_file: Path = audio_files[0]
# session_state
if "play_audio" not in st.session_state:
st.session_state.play_audio = False
# ui
st.title("🔊 音频文件浏览器")
column1, column2 = st.columns([4, 1])
with column1:
st.audio(audio_file, format=f"{audio_file.suffix}", autoplay=True)
with st.spinner("生成频谱图中..."):
spectrogram = generate_spectrogram(audio_file)
st.image(spectrogram, use_container_width=True)
with column2:
shortcut_button(
label=Labels.noise.value,
shortcut="1",
on_click=when_click_annotation_button,
kwargs={
"filename": audio_file,
"label": Labels.noise.value,
"tgt_dir": tgt_dir,
},
type="primary",
)
shortcut_button(
shortcut="2",
label=Labels.noisy.value,
on_click=when_click_annotation_button,
kwargs={
"filename": audio_file,
"label": Labels.noisy.value,
"tgt_dir": tgt_dir,
},
type="primary",
)
return
if __name__ == "__main__":
main()