#!/usr/bin/python3 # -*- coding: utf-8 -*- import argparse import asyncio import base64 import time from enum import Enum from io import BytesIO import os from os import times from pathlib import Path import shutil import sys from typing import List import uuid pwd = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(pwd, "../")) import edge_tts import librosa import librosa.display import matplotlib.pyplot as plt import numpy as np import streamlit as st from streamlit_shortcuts import shortcut_button from streamlit_webrtc import webrtc_streamer from project_settings import project_path, temp_dir # ENTRYPOINT ["streamlit", "run", "streamlit/nx_noise_app.py", "--server.port=8501", "--server.address=0.0.0.0"] # streamlit run streamlit/nx_speech_app.py --server.port=8501 --server.address=0.0.0.0 def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--src_dir", default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14").as_posix(), type=str ) parser.add_argument( "--tgt_dir", default=(project_path / "data/speech/en-PH/2025-01-14/2025-01-14/finished").as_posix(), type=str ) args = parser.parse_args() return args class Labels(Enum): speech = "speech" noise = "noise" async def edge_tts_text_to_speech(text: str, speaker: str = "zh-CN-XiaoxiaoNeural"): communicate = edge_tts.Communicate(text, speaker) audio_file = temp_dir / f"{uuid.uuid4()}.wav" audio_file = audio_file.as_posix() await communicate.save(audio_file) return audio_file def generate_spectrogram(filename: str, title: str = "Spectrogram"): signal, sample_rate = librosa.load(filename, sr=None) mag = np.abs(librosa.stft(signal)) # mag_db = librosa.amplitude_to_db(mag, ref=np.max) mag_db = librosa.amplitude_to_db(mag, ref=20) plt.figure(figsize=(10, 4)) librosa.display.specshow(mag_db, sr=sample_rate) plt.title(title) buf = BytesIO() plt.savefig(buf, format="png", bbox_inches="tight") plt.close() buf.seek(0) return buf @st.cache_data def get_shortcut_audio_dict(): result = { Labels.speech.value: asyncio.run(edge_tts_text_to_speech("语音")), Labels.noise.value: asyncio.run(edge_tts_text_to_speech("噪音")), } return result shortcut_audio_dict = get_shortcut_audio_dict() def main(): args = get_args() src_dir = Path(args.src_dir) tgt_dir = Path(args.tgt_dir) # 获取文件列表 audio_files: List[Path] = [filename for filename in src_dir.glob("**/*.wav")] if len(audio_files) == 0: st.error("没有未标注的音频了。") st.stop() audio_file: Path = audio_files[0] # session_state if "play_audio" not in st.session_state: st.session_state.play_audio = False # ui st.title("🔊 音频文件浏览器") column1, column2 = st.columns([4, 4]) with column1: st.audio(audio_file, format=f"{audio_file.suffix}", autoplay=True) with st.spinner("生成频谱图中..."): spectrogram = generate_spectrogram(audio_file) st.image(spectrogram, use_container_width=True) # hidden audio_placeholder = st.empty() # function def shortcut_audio_play(filename: str): with open(filename, "rb") as f: data = f.read() b64 = base64.b64encode(data).decode() audio_html = f""" """ audio_placeholder.markdown(audio_html, unsafe_allow_html=True) return def when_click_annotation_button(filename: Path, label: str, tgt_dir: Path): shortcut_audio = shortcut_audio_dict[label] shortcut_audio_play(shortcut_audio) time.sleep(1) sub_tgt_dir = tgt_dir / label sub_tgt_dir.mkdir(parents=True, exist_ok=True) shutil.move(filename.as_posix(), sub_tgt_dir) # control with column2: shortcut_button( label=Labels.speech.value, shortcut="1", on_click=when_click_annotation_button, kwargs={ "filename": audio_file, "label": Labels.speech.value, "tgt_dir": tgt_dir, }, type="primary", ) shortcut_button( shortcut="2", label=Labels.noise.value, on_click=when_click_annotation_button, kwargs={ "filename": audio_file, "label": Labels.noise.value, "tgt_dir": tgt_dir, }, type="primary", ) return if __name__ == "__main__": main()