NeMo / text_tokenizer.py
dlxj
同时使用多个 tar 数据集生成词表
8b090a7
import os
import sys
import json
# 将 scripts/tokenizers 添加到 sys.path,以便导入 process_asr_text_tokenizer
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "scripts", "tokenizers")))
# 由于 process_asr_text_tokenizer 内部使用了双下划线的函数,并且在 import 的时候定义了 parser,
# 为了避免冲突,我们直接调用其内部使用的函数,或者使用 os.system 来调用脚本
# 考虑到动态计算 VOCAB_SIZE,我们首先读取 manifest,收集所有的字符来计算 VOCAB_SIZE
def calculate_vocab_size(manifest_paths):
unique_chars = set()
for manifest_path in manifest_paths:
with open(manifest_path, 'r', encoding='utf-8') as f:
for line in f:
item = json.loads(line)
text = item.get('text', '')
for char in text:
unique_chars.add(char)
# 词汇表大小 = 独立字符数 + 预留特殊 token (如 <s>, </s>, <pad>, <unk> 等) 的数量
# 通常预留 10-20 个位置给特殊 token 和未见过的字符,这里我们加 20
vocab_size = len(unique_chars) + 20
# 为了后续的计算,通常建议 vocab_size 为 2 的幂或特定倍数,这里直接返回计算结果
return vocab_size
def main():
manifest_paths = [
"data/common_voice_11_0/ja/train/train_common_voice_11_0_manifest.json",
"data/common_voice_11_0/ja/test_tarred_1bk/tarred_audio_manifest.json",
"data/common_voice_11_0/ja/validation/validation_common_voice_11_0_manifest.json"
]
manifest_paths_str = ",".join(manifest_paths)
data_root = "data/common_voice_11_0/ja/tokenizers"
print("Calculating dynamic VOCAB_SIZE...")
vocab_size = calculate_vocab_size(manifest_paths)
print(f"Calculated VOCAB_SIZE: {vocab_size}")
# 调用 process_asr_text_tokenizer
# 由于 process_asr_text_tokenizer 在导入时会解析命令行参数,所以我们采用 subprocess 的方式调用
# 这样可以避免 argparse 报错,同时为了让脚本找到 nemo,我们将当前目录加入 PYTHONPATH
import subprocess
env = os.environ.copy()
env["PYTHONPATH"] = os.path.abspath(os.path.dirname(__file__)) + os.pathsep + env.get("PYTHONPATH", "")
command = [
"python",
"scripts/tokenizers/process_asr_text_tokenizer.py",
f"--manifest={manifest_paths_str}",
f"--vocab_size={vocab_size}",
f"--data_root={data_root}",
"--tokenizer=spe",
"--spe_type=bpe"
]
# 尝试在 conda 的 NeMo 环境中执行
command = ["conda", "run", "-n", "NeMo"] + command
print(f"Running command: \n{' '.join(command)}")
result = subprocess.run(command, env=env)
exit_code = result.returncode
if exit_code != 0:
print(f"Error executing tokenizer script, exit code: {exit_code}")
sys.exit(1)
else:
print("Tokenizer processing completed successfully.")
if __name__ == "__main__":
main()