|
import numpy as np |
|
import pyarrow as pa |
|
import pyarrow.compute as pc |
|
import pyarrow.parquet as pq |
|
from huggingface_hub import hf_hub_download, login |
|
|
|
|
|
DATASET = "severo/notebooks_on_the_hub" |
|
|
|
|
|
def write_table(table, filename): |
|
for row_group_size in ["128k", "1m"]: |
|
for use_content_defined_chunking in ["cdc", "no_cdc"]: |
|
for compression in ["none", "snappy"]: |
|
path = f"hf://datasets/{DATASET}/{row_group_size}/{use_content_defined_chunking}/{compression}/{filename}" |
|
print(f"\nTrying to write to {path}") |
|
pq.write_table( |
|
table, |
|
path, |
|
compression=compression, |
|
use_content_defined_chunking=use_content_defined_chunking == "cdc", |
|
write_page_index=True, |
|
row_group_size=128 * 1024 |
|
if row_group_size == "128k" |
|
else 1024 * 1024, |
|
) |
|
|
|
|
|
def main(): |
|
print("Start generation of the parquet files.") |
|
|
|
|
|
login() |
|
|
|
for filename in [ |
|
"2023_05.parquet", |
|
"2023_06.parquet", |
|
"2023_07.parquet", |
|
"2023_08.parquet", |
|
"2023_09.parquet", |
|
"2023_10.parquet", |
|
"2023_11.parquet", |
|
"2023_12.parquet", |
|
"2024_01.parquet", |
|
"2024_02.parquet", |
|
"2024_03.parquet", |
|
"2024_04.parquet", |
|
"2024_05.parquet", |
|
]: |
|
print(f"\n\nProcessing {filename}") |
|
|
|
|
|
path = hf_hub_download( |
|
repo_id="severo/notebooks_on_the_hub", |
|
filename=f"original/{filename}", |
|
repo_type="dataset", |
|
) |
|
|
|
|
|
table = pq.read_table(path) |
|
|
|
write_table(table, filename) |
|
|
|
print("\n\nDone!") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|