|
|
|
|
|
import argparse |
|
import asyncio |
|
from enum import Enum |
|
from io import BytesIO |
|
import os |
|
from pathlib import Path |
|
import shutil |
|
import sys |
|
from typing import List |
|
import uuid |
|
|
|
pwd = os.path.abspath(os.path.dirname(__file__)) |
|
sys.path.append(os.path.join(pwd, "../")) |
|
|
|
import edge_tts |
|
import librosa |
|
import librosa.display |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
|
|
import streamlit as st |
|
from streamlit_shortcuts import shortcut_button |
|
|
|
from project_settings import project_path, temp_dir |
|
|
|
|
|
|
|
|
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
"--src_dir", |
|
default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14").as_posix(), |
|
type=str |
|
) |
|
parser.add_argument( |
|
"--tgt_dir", |
|
default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14/finished").as_posix(), |
|
type=str |
|
) |
|
args = parser.parse_args() |
|
return args |
|
|
|
|
|
class Labels(Enum): |
|
noise = "noise" |
|
noisy = "noisy" |
|
|
|
|
|
async def edge_tts_text_to_speech(text: str, speaker: str = "zh-CN-XiaoxiaoNeural"): |
|
communicate = edge_tts.Communicate(text, speaker) |
|
|
|
audio_file = temp_dir / f"{uuid.uuid4()}.wav" |
|
audio_file = audio_file.as_posix() |
|
|
|
await communicate.save(audio_file) |
|
return audio_file |
|
|
|
|
|
def generate_spectrogram(filename: str, title: str = "Spectrogram"): |
|
signal, sample_rate = librosa.load(filename, sr=None) |
|
|
|
mag = np.abs(librosa.stft(signal)) |
|
|
|
mag_db = librosa.amplitude_to_db(mag, ref=20) |
|
|
|
plt.figure(figsize=(10, 4)) |
|
librosa.display.specshow(mag_db, sr=sample_rate) |
|
plt.title(title) |
|
|
|
buf = BytesIO() |
|
plt.savefig(buf, format="png", bbox_inches="tight") |
|
plt.close() |
|
buf.seek(0) |
|
return buf |
|
|
|
|
|
def when_click_annotation_button(filename: Path, label: str, tgt_dir: Path): |
|
sub_tgt_dir = tgt_dir / label |
|
sub_tgt_dir.mkdir(parents=True, exist_ok=True) |
|
shutil.move(filename.as_posix(), sub_tgt_dir) |
|
|
|
|
|
def main(): |
|
args = get_args() |
|
|
|
src_dir = Path(args.src_dir) |
|
tgt_dir = Path(args.tgt_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
audio_files: List[Path] = [filename for filename in src_dir.glob("**/*.wav")] |
|
if len(audio_files) == 0: |
|
st.error("没有未标注的音频了。") |
|
st.stop() |
|
|
|
audio_file: Path = audio_files[0] |
|
|
|
|
|
if "play_audio" not in st.session_state: |
|
st.session_state.play_audio = False |
|
|
|
|
|
st.title("🔊 音频文件浏览器") |
|
|
|
column1, column2 = st.columns([4, 1]) |
|
with column1: |
|
st.audio(audio_file, format=f"{audio_file.suffix}", autoplay=True) |
|
|
|
with st.spinner("生成频谱图中..."): |
|
spectrogram = generate_spectrogram(audio_file) |
|
st.image(spectrogram, use_container_width=True) |
|
|
|
with column2: |
|
shortcut_button( |
|
label=Labels.noise.value, |
|
shortcut="1", |
|
on_click=when_click_annotation_button, |
|
kwargs={ |
|
"filename": audio_file, |
|
"label": Labels.noise.value, |
|
"tgt_dir": tgt_dir, |
|
}, |
|
type="primary", |
|
) |
|
shortcut_button( |
|
shortcut="2", |
|
label=Labels.noisy.value, |
|
on_click=when_click_annotation_button, |
|
kwargs={ |
|
"filename": audio_file, |
|
"label": Labels.noisy.value, |
|
"tgt_dir": tgt_dir, |
|
}, |
|
type="primary", |
|
) |
|
|
|
return |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|