Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 2,330 Bytes
1be89f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os

import click
import polars as pl


@click.command()
@click.option(
    "--src_dir",
    type=click.Path(exists=True, file_okay=False),
    required=True,
    help="Path to the directory containing source Parquet files, e.g., './50m'.",
)
@click.option(
    "--dst_dir",
    type=click.Path(file_okay=False),
    required=False,
    help="Path to the directory where Parquet files will be saved. "
    "If not specified, Parquet files are saved in 'src_dir' with prefix 'seq_'.",
)
@click.option(
    "--files",
    type=str,
    multiple=True,
    help="List of parquet filenames to convert. If not specified, all Parquet files in 'src_dir' will be converted. "
    "For example, '--files dislike.parquet --files like.parquet'.",
)
@click.option(
    "--aggregation",
    type=click.Choice(["structs", "columns"]),
    required=True,
    help="Agg method: 'structs' for a sequence of structs per 'uid', 'columns' for individual column aggregation.",
)
def cli(src_dir: str, dst_dir: str, files: list[str], aggregation: str):
    transform2sequential(src_dir, dst_dir, files, aggregation)


def transform2sequential(src_dir: str, dst_dir: str, files: list[str], aggregation: str):
    for file in files:
        print(f"Processing file: {file}")

        src_path = os.path.join(src_dir, file)

        parquet_path = os.path.join(dst_dir, file)

        if os.path.exists(parquet_path):
            parquet_path = os.path.join(dst_dir, f"{aggregation}_" + file)
            assert not os.path.exists(parquet_path)

        os.makedirs(dst_dir, exist_ok=True)

        df = pl.scan_parquet(src_path)

        if aggregation == "structs":
            seq_df = (
                df.select("uid", pl.struct(pl.all().exclude("uid")).alias("events"))
                .group_by("uid", maintain_order=True)
                .agg(pl.col("events"))
            )
            seq_df.sink_parquet(
                parquet_path,
                compression="lz4",
                statistics=True,
            )

        elif aggregation == "columns":
            col_agg_df = df.group_by("uid", maintain_order=True).agg(pl.all().exclude("uid"))
            col_agg_df.sink_parquet(
                parquet_path,
                compression="lz4",
                statistics=True,
            )


if __name__ == "__main__":
    cli()