File size: 3,322 Bytes
8fcf91d
5304b62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fcf91d
 
5304b62
8fcf91d
 
 
 
 
5304b62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fcf91d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from multiprocessing import Pool
from typing import List, Literal

import numpy as np
import torchaudio
from datasets import concatenate_datasets, load_dataset
from pandas import concat, read_csv
from tqdm import tqdm


def get_audio_length(file_path: str) -> float:
    """
    計錄音總長度
    """
    metadata = torchaudio.info(file_path)
    return metadata.num_frames / metadata.sample_rate


def get_info(subsets: List[str] | Literal["saamgwokjinji", "seoiwuzyun"]) -> None:
    """
    計音頻長度、字數、平均字數、中位數字數、覆蓋字數、平均語速
    """
    if not isinstance(subsets, list):
        subsets = [subsets]

    datasets = []
    for subset in subsets:
        dataset = load_dataset('audiofolder', data_dir=f'./opus/{subset}', split='train')
        datasets.append(dataset)

    if len(datasets) > 1:
        dataset = concatenate_datasets(datasets)
    else:
        dataset = datasets[0]

    # Get all file paths
    file_paths = [item['audio']['path'] for item in dataset]

    # Use a process pool to calculate durations in parallel
    with Pool() as p:
        durations = list(tqdm(p.imap(get_audio_length, file_paths), total=len(file_paths)))

    total_duration = sum(durations)

    # Calculate statistics
    min_duration = min(durations)
    max_duration = max(durations)
    avg_duration = np.mean(durations)
    median_duration = np.median(durations)

    # Print results
    print(f"Statistics for: {' & '.join(subsets)}")
    print(f"Total audio duration: {total_duration / 3600:.2f} hours")
    print(f"Total audio duration: {total_duration / 60:.2f} minutes")
    print(f"Minimum audio duration: {min_duration:.3f} seconds")
    print(f"Maximum audio duration: {max_duration:.3f} seconds")
    print(f"Average audio duration: {avg_duration:.3f} seconds")
    print(f"Median audio duration: {median_duration:.3f} seconds")

    # Concatenate metadata DataFrames
    metadata_list = []
    for subset in subsets:
        metadata = read_csv(f'./opus/{subset}/metadata.csv')
        metadata_list.append(metadata)

    if len(metadata_list) > 1:
        metadata = concat(metadata_list)
    else:
        metadata = metadata_list[0]

    # Calculate total number of characters
    total_characters = metadata['transcription'].str.len().sum()

    # Calculate mean number of characters
    average_characters = metadata['transcription'].str.len().mean()

    # Calculate median number of characters
    median_characters = metadata['transcription'].str.len().median()

    # Get unique characters
    unique_characters = set(''.join(metadata['transcription']))

    # Print results
    print(f"Total characters: {total_characters}")
    print(f"Average characters per clip: {average_characters:.2f}")
    print(f"Median characters per transcription: {median_characters}")
    print(f"Number of unique characters: {len(unique_characters)}")
    print(f"Average speech rate: {total_characters / total_duration:.2f} characters per second")
    print(f"Unique characters: {''.join(sorted(unique_characters))}")
    print("-" * 20)


if __name__ == '__main__':
    # get_info('saamgwokjinji')
    # get_info('seoiwuzyun')
    # get_info('mouzaakdung')
    get_info('lukdinggei')
    # get_info(['saamgwokjinji', 'seoiwuzyun', 'mouzaakdung', 'lukdinggei'])