File size: 1,997 Bytes
16da98a
 
 
 
 
 
 
4fa615e
16da98a
 
 
4fa615e
 
 
16da98a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fa615e
 
 
 
 
 
 
 
 
 
 
 
16da98a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.parquet as pq
from huggingface_hub import hf_hub_download, login

# Update this if needed
DATASET = "severo/notebooks_on_the_hub"


def write_table(table, filename):
    for row_group_size in ["128k", "1m"]:
        for use_content_defined_chunking in ["cdc", "no_cdc"]:
            for compression in ["none", "snappy"]:
                path = f"hf://datasets/{DATASET}/{row_group_size}/{use_content_defined_chunking}/{compression}/{filename}"
                print(f"\nTrying to write to {path}")
                pq.write_table(
                    table,
                    path,
                    compression=compression,
                    use_content_defined_chunking=use_content_defined_chunking == "cdc",
                    write_page_index=True,
                    row_group_size=128 * 1024
                    if row_group_size == "128k"
                    else 1024 * 1024,
                )


def main():
    print("Start generation of the parquet files.")

    # always ask for a token (TODO: make it more convenient)
    login()

    for filename in [
        "2023_05.parquet",
        "2023_06.parquet",
        "2023_07.parquet",
        "2023_08.parquet",
        "2023_09.parquet",
        "2023_10.parquet",
        "2023_11.parquet",
        "2023_12.parquet",
        "2024_01.parquet",
        "2024_02.parquet",
        "2024_03.parquet",
        "2024_04.parquet",
        "2024_05.parquet",
    ]:
        print(f"\n\nProcessing {filename}")

        # download the file from Hugging Face Hub into local cache
        path = hf_hub_download(
            repo_id="severo/notebooks_on_the_hub",
            filename=f"original/{filename}",
            repo_type="dataset",
        )

        # read the cached parquet file into a PyArrow table
        table = pq.read_table(path)

        write_table(table, filename)

    print("\n\nDone!")


if __name__ == "__main__":
    main()