| import os |
| import sys |
| import json |
|
|
| |
| sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "scripts", "tokenizers"))) |
|
|
| |
| |
| |
|
|
| def calculate_vocab_size(manifest_paths): |
| unique_chars = set() |
| for manifest_path in manifest_paths: |
| with open(manifest_path, 'r', encoding='utf-8') as f: |
| for line in f: |
| item = json.loads(line) |
| text = item.get('text', '') |
| for char in text: |
| unique_chars.add(char) |
| |
| |
| |
| vocab_size = len(unique_chars) + 20 |
| |
| return vocab_size |
|
|
| def main(): |
| manifest_paths = [ |
| "data/common_voice_11_0/ja/train/train_common_voice_11_0_manifest.json", |
| "data/common_voice_11_0/ja/test_tarred_1bk/tarred_audio_manifest.json", |
| "data/common_voice_11_0/ja/validation/validation_common_voice_11_0_manifest.json" |
| ] |
| manifest_paths_str = ",".join(manifest_paths) |
| data_root = "data/common_voice_11_0/ja/tokenizers" |
| |
| print("Calculating dynamic VOCAB_SIZE...") |
| vocab_size = calculate_vocab_size(manifest_paths) |
| print(f"Calculated VOCAB_SIZE: {vocab_size}") |
|
|
| |
| |
| |
| import subprocess |
| env = os.environ.copy() |
| env["PYTHONPATH"] = os.path.abspath(os.path.dirname(__file__)) + os.pathsep + env.get("PYTHONPATH", "") |
|
|
| command = [ |
| "python", |
| "scripts/tokenizers/process_asr_text_tokenizer.py", |
| f"--manifest={manifest_paths_str}", |
| f"--vocab_size={vocab_size}", |
| f"--data_root={data_root}", |
| "--tokenizer=spe", |
| "--spe_type=bpe" |
| ] |
| |
| |
| command = ["conda", "run", "-n", "NeMo"] + command |
| |
| print(f"Running command: \n{' '.join(command)}") |
| result = subprocess.run(command, env=env) |
| exit_code = result.returncode |
| if exit_code != 0: |
| print(f"Error executing tokenizer script, exit code: {exit_code}") |
| sys.exit(1) |
| else: |
| print("Tokenizer processing completed successfully.") |
|
|
| if __name__ == "__main__": |
| main() |
|
|