|
|
|
|
|
import os |
|
|
import shutil |
|
|
import re |
|
|
from pypinyin import pinyin, Style |
|
|
from textgrid import TextGrid, IntervalTier, Interval |
|
|
import codecs |
|
|
from collections import defaultdict |
|
|
|
|
|
|
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
SOURCE_DATASET_DIR = r"D:\DiffSingerDatasets\m4singer" |
|
|
OUTPUT_DATASET_DIR = r"D:\DiffSingerDatasets\m4singer_processed" |
|
|
OPENCPOP_DICT_PATH = r"D:\DiffSingerDatasets\SOFA\dictionary\opencpop-extension.txt" |
|
|
CREATE_OUTPUT_DIR_IF_NOT_EXISTS = True |
|
|
TIME_TOLERANCE = 0.015 |
|
|
INITIAL_PHONE_SPLIT_FRACTION = 0.2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEXTGRID_TO_DICT_SIMPLIFY_MAP = { |
|
|
|
|
|
'i': ['i', 'ir', 'i0'], |
|
|
|
|
|
'iou': ['iu', 'ou', 'o'], |
|
|
'uei': ['ui', 'ei', 'uai'], |
|
|
'uen': ['un', 'en', 'vn'], |
|
|
|
|
|
'v': ['v'], |
|
|
'u': ['u','v'], |
|
|
've': ['ve'], |
|
|
'van': ['van','uan'], |
|
|
'vn': ['vn', 'un'], |
|
|
|
|
|
'ie': ['ie', 'E'], |
|
|
'ian': ['ian', 'En'], |
|
|
|
|
|
'ua': ['a', 'ua'], |
|
|
'uai': ['ai', 'uai'], |
|
|
'uan': ['an', 'uan', 'van'], |
|
|
'uang': ['ang', 'uang'], |
|
|
'uo': ['o', 'uo'], |
|
|
|
|
|
'ia': ['a', 'ia'], |
|
|
'iao': ['ao', 'iao'], |
|
|
'iang': ['ang', 'iang'], |
|
|
'iong': ['ong', 'iong'], |
|
|
|
|
|
'ueng': ['ueng', 'ong'], |
|
|
|
|
|
'a': ['a'], 'o': ['o'], 'e': ['e', 'E'], 'ai': ['ai'], 'ei': ['ei'], |
|
|
'ao': ['ao'], 'ou': ['ou'], 'an': ['an'], 'en': ['en'], 'ang': ['ang'], |
|
|
'eng': ['eng'], 'ong': ['ong'], 'in': ['in'], 'un': ['un'], 'ui': ['ui'], |
|
|
'iu': ['iu'], 'ir': ['ir'], 'i0': ['i0'], |
|
|
} |
|
|
|
|
|
DICT_TO_TEXTGRID_SIMPLIFY_MAP = defaultdict(list) |
|
|
|
|
|
|
|
|
|
|
|
def parse_opencpop_dict(dict_path): |
|
|
"""Parses the dictionary, returns map and populates reverse map.""" |
|
|
pinyin_to_phonemes = {} |
|
|
all_dict_phonemes = set() |
|
|
try: |
|
|
with codecs.open(dict_path, 'r', encoding='utf-8') as f: raw_lines = f.readlines() |
|
|
except UnicodeDecodeError: |
|
|
try: |
|
|
with codecs.open(dict_path, 'r', encoding='gbk') as f: raw_lines = f.readlines() |
|
|
print("Info: Dictionary file read as GBK (UTF-8 failed).") |
|
|
except Exception as e_inner: print(f"Error reading dictionary {dict_path}: {e_inner}"); return None, None |
|
|
except FileNotFoundError: print(f"Error: Dictionary file not found at {dict_path}"); return None, None |
|
|
except Exception as e: print(f"Error reading dictionary {dict_path}: {e}"); return None, None |
|
|
|
|
|
for line in raw_lines: |
|
|
line = line.strip() |
|
|
if not line or '\t' not in line: continue |
|
|
parts = line.split('\t', 1) |
|
|
pinyin_key = parts[0].strip(); phonemes_str = parts[1].strip() |
|
|
phonemes = [ph for ph in phonemes_str.split(' ') if ph] |
|
|
if pinyin_key and phonemes: |
|
|
pinyin_to_phonemes[pinyin_key] = phonemes |
|
|
all_dict_phonemes.update(phonemes) |
|
|
|
|
|
if 'AP' not in pinyin_to_phonemes: pinyin_to_phonemes['AP'] = ['AP']; all_dict_phonemes.add('AP') |
|
|
if 'SP' not in pinyin_to_phonemes: pinyin_to_phonemes['SP'] = ['SP']; all_dict_phonemes.add('SP') |
|
|
|
|
|
|
|
|
DICT_TO_TEXTGRID_SIMPLIFY_MAP.clear() |
|
|
mapped_dict_phonemes = set() |
|
|
for tg_form, dict_forms in TEXTGRID_TO_DICT_SIMPLIFY_MAP.items(): |
|
|
for dict_form in dict_forms: |
|
|
DICT_TO_TEXTGRID_SIMPLIFY_MAP[dict_form].append(tg_form) |
|
|
mapped_dict_phonemes.add(dict_form) |
|
|
for ph in all_dict_phonemes: |
|
|
if ph not in mapped_dict_phonemes: |
|
|
DICT_TO_TEXTGRID_SIMPLIFY_MAP[ph].append(ph) |
|
|
|
|
|
return pinyin_to_phonemes, all_dict_phonemes |
|
|
|
|
|
def get_song_pinyin_name(name): |
|
|
"""Converts a song name to a safe pinyin filename.""" |
|
|
try: |
|
|
contains_chinese = any('\u4e00' <= char <= '\u9fff' for char in name) |
|
|
if not contains_chinese: |
|
|
safe_name = name.lower().strip() |
|
|
safe_name = re.sub(r'\s+', '_', safe_name); safe_name = re.sub(r'[^a-z0-9_]+', '', safe_name) |
|
|
safe_name = re.sub(r'_+', '_', safe_name).strip('_'); return safe_name if safe_name else "unknown_song" |
|
|
pinyin_list = pinyin(name, style=Style.NORMAL, errors='ignore') |
|
|
if not pinyin_list: raise ValueError("pypinyin returned empty list or failed") |
|
|
flat_pinyin = "_".join([item[0] for item in pinyin_list if item and item[0]]) |
|
|
safe_name = re.sub(r'[^\w_]+', '', flat_pinyin).lower() |
|
|
safe_name = re.sub(r'_+', '_', safe_name).strip('_'); return safe_name if safe_name else "unknown_song" |
|
|
except Exception as e: |
|
|
print(f"Info: Could not convert '{name}' to pinyin ({e}). Using fallback cleaning.") |
|
|
safe_name = re.sub(r'[^\w]+', '_', name).strip('_').lower(); return safe_name if safe_name else "unknown_song" |
|
|
|
|
|
def clean_special_symbols(text): |
|
|
"""Removes < > brackets.""" |
|
|
if isinstance(text, str) and text.startswith('<') and text.endswith('>'): return text[1:-1] |
|
|
return text |
|
|
|
|
|
def get_phoneme_intervals_for_char(char_interval, phone_tier, tolerance): |
|
|
"""Finds the sequence of original phone INTERVALS corresponding to the char_interval.""" |
|
|
char_mark_cleaned = clean_special_symbols(char_interval.mark) |
|
|
if char_mark_cleaned in ['AP', 'SP']: |
|
|
found_interval = None; min_diff = float('inf') |
|
|
for phn_interval in phone_tier: |
|
|
time_diff = abs(phn_interval.minTime - char_interval.minTime) + abs(phn_interval.maxTime - char_interval.maxTime) |
|
|
mark_match = clean_special_symbols(phn_interval.mark) == char_mark_cleaned |
|
|
if time_diff < tolerance and mark_match: |
|
|
if time_diff < min_diff: found_interval = phn_interval; min_diff = time_diff |
|
|
if found_interval: return [found_interval] |
|
|
else: print(f"Warning: No phone interval found for {char_mark_cleaned} char interval [{char_interval.minTime:.3f}-{char_interval.maxTime:.3f}]."); return None |
|
|
|
|
|
start_phone_idx, end_phone_idx = -1, -1 |
|
|
min_start_diff, min_end_diff = float('inf'), float('inf') |
|
|
for i, phn_interval in enumerate(phone_tier): |
|
|
diff = abs(phn_interval.minTime - char_interval.minTime) |
|
|
if diff < tolerance and diff < min_start_diff: min_start_diff = diff; start_phone_idx = i |
|
|
if start_phone_idx != -1: |
|
|
search_start_index = start_phone_idx; max_expected_end_time = char_interval.maxTime + tolerance * 5 |
|
|
for i in range(search_start_index, len(phone_tier)): |
|
|
phn_interval = phone_tier[i] |
|
|
if phn_interval.minTime > char_interval.maxTime + tolerance: break |
|
|
if phn_interval.maxTime > max_expected_end_time and end_phone_idx != -1: break |
|
|
diff = abs(phn_interval.maxTime - char_interval.maxTime) |
|
|
if diff < tolerance: |
|
|
if diff < min_end_diff: min_end_diff = diff; end_phone_idx = i |
|
|
elif end_phone_idx == -1 and diff < tolerance * 2: |
|
|
if diff < min_end_diff: min_end_diff = diff; end_phone_idx = i |
|
|
if start_phone_idx != -1 and end_phone_idx != -1 and end_phone_idx >= start_phone_idx: |
|
|
intervals = [phone_tier[i] for i in range(start_phone_idx, end_phone_idx + 1)] |
|
|
non_sil_intervals = [iv for iv in intervals if clean_special_symbols(iv.mark) not in ['AP', 'SP', ''] and iv.mark and iv.mark.strip()] |
|
|
if non_sil_intervals: return non_sil_intervals |
|
|
elif intervals: print(f"Warning: Found time range for char '{char_interval.mark}' but only silence/empty phones within. [{char_interval.minTime:.3f}-{char_interval.maxTime:.3f}]"); return None |
|
|
else: return None |
|
|
else: print(f"Warning: Could not determine phone interval sequence for char '{char_interval.mark}' interval [{char_interval.minTime:.3f}-{char_interval.maxTime:.3f}]. (Start diff: {min_start_diff:.4f}, End diff: {min_end_diff:.4f})."); return None |
|
|
|
|
|
|
|
|
def fuzzy_match_sequences(found_phones, expected_phones): |
|
|
"""More robust fuzzy matching based on specific patterns.""" |
|
|
if not found_phones or not expected_phones: return False |
|
|
if found_phones == expected_phones: return True |
|
|
|
|
|
f_str = "".join(found_phones) |
|
|
e_str = "".join(expected_phones) |
|
|
if f_str == e_str: return True |
|
|
|
|
|
if len(expected_phones) == 2 and expected_phones[0] in ['y', 'w'] and len(found_phones) == 1: |
|
|
expected_final = expected_phones[1]; found_final_tg = found_phones[0] |
|
|
if found_final_tg in DICT_TO_TEXTGRID_SIMPLIFY_MAP.get(expected_final, []): return True |
|
|
|
|
|
if len(expected_phones) == 1 and len(found_phones) == 2 and found_phones[0] in ['y', 'w']: |
|
|
expected_final = expected_phones[0]; found_final_tg = found_phones[1] |
|
|
if found_final_tg in DICT_TO_TEXTGRID_SIMPLIFY_MAP.get(expected_final, []): return True |
|
|
|
|
|
if len(found_phones) == len(expected_phones): |
|
|
match = True; initial = expected_phones[0] if len(expected_phones) > 0 else None |
|
|
for i in range(len(found_phones)): |
|
|
f_ph, e_ph = found_phones[i], expected_phones[i] |
|
|
possible_tg_forms = DICT_TO_TEXTGRID_SIMPLIFY_MAP.get(e_ph, [e_ph]) |
|
|
if f_ph == e_ph or f_ph in possible_tg_forms: continue |
|
|
if e_ph == 'v' and f_ph == 'u' and initial in 'jqxy': continue |
|
|
if e_ph == 'van' and f_ph == 'uan' and initial in 'jqxy': continue |
|
|
if e_ph == 'vn' and f_ph == 'un' and initial in 'jqxy': continue |
|
|
if e_ph == 'uai' and f_ph == 'uei' and initial == 'h': continue |
|
|
if e_ph == 'ui' and f_ph == 'uei' and initial != 'h': continue |
|
|
if e_ph == 'iang' and f_ph == 'ang' and initial == 'x': continue |
|
|
match = False; break |
|
|
if match: return True |
|
|
|
|
|
return False |
|
|
|
|
|
def get_correct_pinyin_and_map(char_mark, original_phone_intervals, pinyin_phoneme_map): |
|
|
"""Determines best pinyin, handling '嗯' and direct joined check.""" |
|
|
cleaned_char = clean_special_symbols(char_mark) |
|
|
if not cleaned_char or cleaned_char.isspace(): return cleaned_char, None |
|
|
if cleaned_char in ['AP', 'SP']: return cleaned_char, [cleaned_char] |
|
|
|
|
|
try: possible_pinyins_raw = pinyin(cleaned_char, style=Style.NORMAL, heteronym=True)[0] |
|
|
except Exception as e: print(f"Error getting pinyin for '{cleaned_char}': {e}."); return "?", None |
|
|
|
|
|
possible_pinyins = [] |
|
|
if cleaned_char == '嗯': |
|
|
for p in possible_pinyins_raw: |
|
|
if p == 'n': possible_pinyins.append('en') |
|
|
elif p == 'ng': possible_pinyins.append('eng') |
|
|
else: possible_pinyins.append(p) |
|
|
if not possible_pinyins: possible_pinyins = ['en'] |
|
|
else: possible_pinyins = possible_pinyins_raw |
|
|
|
|
|
if original_phone_intervals is None: |
|
|
chosen_pinyin = possible_pinyins[0] |
|
|
print(f"Warning: Defaulting pinyin '{cleaned_char}'->'{chosen_pinyin}' due to phone mapping error.") |
|
|
target_phones = pinyin_phoneme_map.get(chosen_pinyin) |
|
|
if target_phones is None: print(f"Error: Default pinyin '{chosen_pinyin}' for char '{cleaned_char}' not in dictionary!") |
|
|
return chosen_pinyin, target_phones |
|
|
|
|
|
found_phones_clean = [clean_special_symbols(iv.mark) for iv in original_phone_intervals] |
|
|
found_phones_str = " ".join(found_phones_clean) |
|
|
joined_found_phones = "".join(found_phones_clean) |
|
|
|
|
|
best_match_pinyin = None; match_type = "No Match" |
|
|
|
|
|
if joined_found_phones in pinyin_phoneme_map: |
|
|
best_match_pinyin = joined_found_phones; match_type = "Direct Joined" |
|
|
|
|
|
if match_type == "No Match": |
|
|
for pyn in possible_pinyins: |
|
|
if pyn in pinyin_phoneme_map: |
|
|
if " ".join(pinyin_phoneme_map[pyn]) == found_phones_str: |
|
|
best_match_pinyin = pyn; match_type = "Exact"; break |
|
|
|
|
|
if match_type == "No Match": |
|
|
fuzzy_matches = [] |
|
|
for pyn in possible_pinyins: |
|
|
if pyn in pinyin_phoneme_map: |
|
|
if fuzzy_match_sequences(found_phones_clean, pinyin_phoneme_map[pyn]): fuzzy_matches.append(pyn) |
|
|
if len(fuzzy_matches) == 1: best_match_pinyin = fuzzy_matches[0]; match_type = "Fuzzy" |
|
|
elif len(fuzzy_matches) > 1: print(f"Warning: Ambiguous fuzzy match for '{cleaned_char}' ('{found_phones_str}'). Matches: {fuzzy_matches}. Defaulting."); match_type = "Default"; best_match_pinyin = possible_pinyins[0] |
|
|
else: match_type = "Default"; best_match_pinyin = possible_pinyins[0] |
|
|
|
|
|
chosen_pinyin = best_match_pinyin if best_match_pinyin else possible_pinyins[0] |
|
|
if cleaned_char == '嗯': |
|
|
if chosen_pinyin == 'n' and 'en' in pinyin_phoneme_map: chosen_pinyin = 'en' |
|
|
elif chosen_pinyin == 'ng' and 'eng' in pinyin_phoneme_map: chosen_pinyin = 'eng' |
|
|
|
|
|
if match_type == "Default": |
|
|
expected_details = {p: pinyin_phoneme_map.get(p, ["Not in Dict"]) for p in possible_pinyins} |
|
|
warning_msg = f"Warning: No reliable match for phonemes '{found_phones_str}' (derived for char '{cleaned_char}'). Possible pinyins: {possible_pinyins}. " |
|
|
warning_msg += ", ".join([f"Expected for '{p}': {det}" for p, det in expected_details.items()]) + "." |
|
|
warning_msg += f" Defaulting to pinyin: {chosen_pinyin}" |
|
|
print(warning_msg) |
|
|
|
|
|
target_phonemes = pinyin_phoneme_map.get(chosen_pinyin) |
|
|
if target_phonemes is None: |
|
|
print(f"Error: Chosen pinyin '{chosen_pinyin}' for char '{cleaned_char}' not in dictionary!") |
|
|
if cleaned_char == '嗯': |
|
|
if 'en' in pinyin_phoneme_map: chosen_pinyin, target_phonemes = 'en', pinyin_phoneme_map['en'] |
|
|
elif 'eng' in pinyin_phoneme_map: chosen_pinyin, target_phonemes = 'eng', pinyin_phoneme_map['eng'] |
|
|
|
|
|
return chosen_pinyin, target_phonemes |
|
|
|
|
|
|
|
|
def process_textgrid_file(tg_path, pinyin_phoneme_map, tolerance, split_fraction): |
|
|
"""Processes a single TextGrid file with duration splitting.""" |
|
|
try: tg = TextGrid.fromFile(tg_path) |
|
|
except Exception as e: print(f"Error reading TG {tg_path}: {e}"); return None |
|
|
if len(tg.tiers) < 2: print(f"Warning: TG {tg_path} has < 2 tiers."); return None |
|
|
|
|
|
original_word_tier, original_phone_tier = None, None |
|
|
tier_names = [t.name.lower() if t.name else "" for t in tg.tiers] |
|
|
try: |
|
|
word_indices = [i for i, name in enumerate(tier_names) if name and ("word" in name or "hanzi" in name or "char" in name)] |
|
|
phone_indices = [i for i, name in enumerate(tier_names) if name and ("phone" in name or "syllable" in name or "phoneme" in name or "pinyin" in name)] |
|
|
if word_indices: original_word_tier = tg.tiers[word_indices[0]] |
|
|
if phone_indices: original_phone_tier = tg.tiers[phone_indices[0]] |
|
|
except Exception: pass |
|
|
if original_word_tier is None: original_word_tier = tg.tiers[0] |
|
|
if original_phone_tier is None: |
|
|
if len(tg.tiers) > 1: original_phone_tier = tg.tiers[1] |
|
|
else: print(f"Error: Cannot identify phone tier in {tg_path}."); return None |
|
|
|
|
|
new_word_tier = IntervalTier(name="words", minTime=tg.minTime, maxTime=tg.maxTime) |
|
|
new_phone_tier = IntervalTier(name="phones", minTime=tg.minTime, maxTime=tg.maxTime) |
|
|
last_word_max_time = tg.minTime; last_phone_max_time = tg.minTime |
|
|
|
|
|
for word_interval in original_word_tier: |
|
|
if word_interval.minTime > last_word_max_time + tolerance: new_word_tier.addInterval(Interval(last_word_max_time, word_interval.minTime, "")) |
|
|
if word_interval.minTime > last_phone_max_time + tolerance: new_phone_tier.addInterval(Interval(last_phone_max_time, word_interval.minTime, "")) |
|
|
if not word_interval.mark and word_interval.maxTime > word_interval.minTime: |
|
|
new_word_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, "")) |
|
|
new_phone_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, "")) |
|
|
last_word_max_time = max(last_word_max_time, word_interval.maxTime); last_phone_max_time = max(last_phone_max_time, word_interval.maxTime) |
|
|
continue |
|
|
elif not word_interval.mark or word_interval.maxTime <= word_interval.minTime: continue |
|
|
|
|
|
original_phone_intervals = get_phoneme_intervals_for_char(word_interval, original_phone_tier, tolerance) |
|
|
chosen_pinyin, target_phonemes = get_correct_pinyin_and_map(word_interval.mark, original_phone_intervals, pinyin_phoneme_map) |
|
|
|
|
|
new_word_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, chosen_pinyin)) |
|
|
last_word_max_time = max(last_word_max_time, word_interval.maxTime) |
|
|
|
|
|
if target_phonemes and original_phone_intervals: |
|
|
n_target, n_orig = len(target_phonemes), len(original_phone_intervals) |
|
|
if n_target == n_orig: |
|
|
for i in range(n_target): |
|
|
orig_iv = original_phone_intervals[i] |
|
|
if orig_iv.maxTime > orig_iv.minTime: new_phone_tier.addInterval(Interval(orig_iv.minTime, orig_iv.maxTime, target_phonemes[i])); last_phone_max_time = max(last_phone_max_time, orig_iv.maxTime) |
|
|
elif n_target == 1: |
|
|
total_min_time = original_phone_intervals[0].minTime; total_max_time = original_phone_intervals[-1].maxTime |
|
|
if total_max_time > total_min_time: new_phone_tier.addInterval(Interval(total_min_time, total_max_time, target_phonemes[0])); last_phone_max_time = max(last_phone_max_time, total_max_time) |
|
|
|
|
|
elif n_target > n_orig and n_orig >= 1: |
|
|
orig_min = original_phone_intervals[0].minTime; orig_max = original_phone_intervals[-1].maxTime; orig_dur = orig_max - orig_min |
|
|
if orig_dur > 1e-6: |
|
|
use_initial_split = (n_orig == 1) |
|
|
short_dur = 0 |
|
|
if use_initial_split: short_dur = orig_dur * split_fraction |
|
|
|
|
|
split_dur = orig_dur / n_target |
|
|
phone_start_time = orig_min |
|
|
|
|
|
for i in range(n_target - 1): |
|
|
current_split_dur = short_dur if use_initial_split and i==0 else split_dur |
|
|
phone_end_time = min(phone_start_time + current_split_dur , orig_max) |
|
|
if phone_end_time > phone_start_time: new_phone_tier.addInterval(Interval(phone_start_time, phone_end_time, target_phonemes[i])) |
|
|
phone_start_time = phone_end_time |
|
|
|
|
|
if orig_max > phone_start_time: new_phone_tier.addInterval(Interval(phone_start_time, orig_max, target_phonemes[n_target - 1])) |
|
|
last_phone_max_time = max(last_phone_max_time, orig_max) |
|
|
else: print(f"Warning: Orig phone interval for '{word_interval.mark}' zero duration. Cannot split."); new_phone_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, "")); last_phone_max_time = max(last_phone_max_time, word_interval.maxTime) |
|
|
|
|
|
else: |
|
|
print(f"Warning: Phoneme length mismatch for '{word_interval.mark}' ({chosen_pinyin}). T:{n_target}{target_phonemes}, O:{n_orig}. Copying original.") |
|
|
temp_last_phone_time = word_interval.minTime |
|
|
for iv in original_phone_intervals: |
|
|
cleaned_mark = clean_special_symbols(iv.mark) |
|
|
if cleaned_mark and cleaned_mark not in ['AP', 'SP'] and iv.maxTime > iv.minTime: new_phone_tier.addInterval(Interval(iv.minTime, iv.maxTime, cleaned_mark)); temp_last_phone_time = max(temp_last_phone_time, iv.maxTime) |
|
|
last_phone_max_time = max(last_phone_max_time, temp_last_phone_time) |
|
|
elif chosen_pinyin in ['AP','SP']: |
|
|
if word_interval.maxTime > word_interval.minTime: new_phone_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, chosen_pinyin)); last_phone_max_time = max(last_phone_max_time, word_interval.maxTime) |
|
|
else: |
|
|
if word_interval.maxTime > word_interval.minTime: new_phone_tier.addInterval(Interval(word_interval.minTime, word_interval.maxTime, "")); last_phone_max_time = max(last_phone_max_time, word_interval.maxTime) |
|
|
last_phone_max_time = max(last_phone_max_time, last_word_max_time) |
|
|
|
|
|
if tg.maxTime > last_word_max_time + tolerance: new_word_tier.addInterval(Interval(last_word_max_time, tg.maxTime, "")) |
|
|
if tg.maxTime > last_phone_max_time + tolerance: new_phone_tier.addInterval(Interval(last_phone_max_time, tg.maxTime, "")) |
|
|
|
|
|
processed_tg = TextGrid(name=os.path.basename(tg_path), minTime=tg.minTime, maxTime=tg.maxTime) |
|
|
processed_tg.append(new_word_tier); processed_tg.append(new_phone_tier) |
|
|
return processed_tg |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("Starting M4Singer dataset processing...") |
|
|
print(f"Source Directory: {SOURCE_DATASET_DIR}") |
|
|
print(f"Output Directory: {OUTPUT_DATASET_DIR}") |
|
|
print(f"Dictionary File: {OPENCPOP_DICT_PATH}") |
|
|
|
|
|
pinyin_phoneme_map, _ = parse_opencpop_dict(OPENCPOP_DICT_PATH) |
|
|
if pinyin_phoneme_map is None: print("Error: Failed to load dictionary."); exit(1) |
|
|
print(f"Loaded Pinyin->Phoneme dictionary with {len(pinyin_phoneme_map)} entries.") |
|
|
|
|
|
if not os.path.isdir(SOURCE_DATASET_DIR): print(f"Error: Source dir '{SOURCE_DATASET_DIR}' not found."); exit(1) |
|
|
if not os.path.isdir(OUTPUT_DATASET_DIR): |
|
|
if CREATE_OUTPUT_DIR_IF_NOT_EXISTS: |
|
|
print(f"Output dir '{OUTPUT_DATASET_DIR}' not found, creating.") |
|
|
try: os.makedirs(OUTPUT_DATASET_DIR) |
|
|
except OSError as e: print(f"Error creating output dir {OUTPUT_DATASET_DIR}: {e}"); exit(1) |
|
|
else: print(f"Error: Output dir '{OUTPUT_DATASET_DIR}' not found."); exit(1) |
|
|
|
|
|
processed_files_count = 0; processed_songs = 0; skipped_dirs = 0; total_dirs = 0 |
|
|
singer_song_dirs = [] |
|
|
try: |
|
|
all_entries = os.listdir(SOURCE_DATASET_DIR) |
|
|
singer_song_dirs = [d for d in all_entries if os.path.isdir(os.path.join(SOURCE_DATASET_DIR, d)) and '#' in d] |
|
|
total_dirs = len(singer_song_dirs) |
|
|
except OSError as e: print(f"Error accessing source {SOURCE_DATASET_DIR}: {e}"); exit(1) |
|
|
print(f"Found {total_dirs} potential singer/song directories.") |
|
|
|
|
|
for singer_song_dir in tqdm(singer_song_dirs, desc="Processing Singers/Songs", unit="song"): |
|
|
parts = singer_song_dir.split('#', 1) |
|
|
if len(parts) != 2: skipped_dirs += 1; continue |
|
|
singer_id, song_name_original = parts[0], parts[1] |
|
|
song_name_pinyin = get_song_pinyin_name(song_name_original) |
|
|
source_song_path = os.path.join(SOURCE_DATASET_DIR, singer_song_dir) |
|
|
output_singer_dir = os.path.join(OUTPUT_DATASET_DIR, singer_id) |
|
|
output_wav_dir = os.path.join(output_singer_dir, "wav") |
|
|
output_tg_dir = os.path.join(output_singer_dir, "TextGrid") |
|
|
try: os.makedirs(output_wav_dir, exist_ok=True); os.makedirs(output_tg_dir, exist_ok=True) |
|
|
except OSError as e: print(f"Error creating output subdirs for {singer_id}: {e}"); skipped_dirs += 1; continue |
|
|
try: files_in_song = sorted([f for f in os.listdir(source_song_path) if f.lower().endswith(".wav")]) |
|
|
except OSError as e: print(f"Error reading files in {source_song_path}: {e}"); skipped_dirs += 1; continue |
|
|
|
|
|
processed_song_flag = False |
|
|
for filename in files_in_song: |
|
|
base_name = os.path.splitext(filename)[0]; segment_id = base_name |
|
|
if not segment_id.isdigit(): continue |
|
|
wav_filename = filename; tg_filename = f"{base_name}.TextGrid" |
|
|
source_wav_path = os.path.join(source_song_path, wav_filename) |
|
|
source_tg_path = os.path.join(source_song_path, tg_filename) |
|
|
if not os.path.exists(source_tg_path): continue |
|
|
|
|
|
new_base_filename = f"{segment_id}_{song_name_pinyin}" |
|
|
output_wav_path = os.path.join(output_wav_dir, f"{new_base_filename}.wav") |
|
|
output_tg_path = os.path.join(output_tg_dir, f"{new_base_filename}.TextGrid") |
|
|
|
|
|
processed_tg = process_textgrid_file(source_tg_path, pinyin_phoneme_map, TIME_TOLERANCE, INITIAL_PHONE_SPLIT_FRACTION) |
|
|
|
|
|
if processed_tg: |
|
|
try: shutil.copy2(source_wav_path, output_wav_path) |
|
|
except Exception as e: print(f" Error copying WAV {wav_filename}: {e}"); continue |
|
|
try: |
|
|
processed_tg.write(output_tg_path) |
|
|
processed_files_count += 1; processed_song_flag = True |
|
|
except Exception as e: |
|
|
print(f" Error writing TG to {output_tg_path}: {e}") |
|
|
if os.path.exists(output_wav_path): |
|
|
try: os.remove(output_wav_path) |
|
|
except OSError as re: print(f" Error removing {output_wav_path}: {re}") |
|
|
|
|
|
if processed_song_flag: processed_songs += 1 |
|
|
|
|
|
print(f"\n--- Processing Summary ---") |
|
|
print(f"Total directories found: {total_dirs}") |
|
|
print(f"Directories processed (at least one file): {processed_songs}") |
|
|
print(f"Directories skipped (format error, read error, etc.): {skipped_dirs}") |
|
|
print(f"Total TextGrid/WAV pairs successfully processed: {processed_files_count}") |
|
|
print(f"Output data saved in: {OUTPUT_DATASET_DIR}") |
|
|
print("--- Done ---") |