File size: 2,535 Bytes
ced4316
d868d2e
ced4316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d868d2e
ced4316
 
 
d868d2e
ced4316
 
 
 
d868d2e
ced4316
 
 
d868d2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ced4316
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import json
from typing import Dict, List, Optional, Tuple

import pandas as pd


def parse_identifier(
    identifier_str, defaults: Dict[str, str], parts_sep: str = ",", key_val_sep: str = "="
) -> Dict[str, str]:
    parts = [
        part.split(key_val_sep)
        for part in identifier_str.strip().split(parts_sep)
        if key_val_sep in part
    ]
    parts_dict = dict(parts)
    return {**defaults, **parts_dict}


def read_nested_json(path: str) -> pd.DataFrame:
    # Read the nested JSON data into a pandas DataFrame
    with open(path, "r") as f:
        data = json.load(f)
    result = pd.json_normalize(data, sep="/")
    result.index.name = "entry"
    return result


def read_nested_jsons(
    json_paths: List[Tuple[str, str]],
    default_key_values: Optional[Dict[str, str]] = None,
    column_level_names: Optional[List[str]] = None,
) -> pd.DataFrame:
    dfs = [read_nested_json(json_path) for identifier_str, json_path in json_paths]
    new_index_levels = pd.MultiIndex.from_frame(
        pd.DataFrame(
            [
                parse_identifier(identifier_str, default_key_values or {})
                for identifier_str, _ in json_paths
            ]
        )
    )
    if len(set(list(new_index_levels))) == len(list(new_index_levels)):
        dfs_concat = pd.concat(
            dfs, keys=list(new_index_levels), names=new_index_levels.names, axis=0
        )
    else:
        dfs_new = []
        ids_unique = []
        for identifier_str in new_index_levels:
            if identifier_str not in ids_unique:
                ids_unique.append(identifier_str)
        # first combine the dataframes with same ids along the columns
        for identifier_str in ids_unique:
            dfs_with_id = [df for df, idx in zip(dfs, new_index_levels) if idx == identifier_str]
            # assert that all columns are distinct
            if len(set([tuple(col) for df in dfs_with_id for col in df.columns])) != sum(
                [len(df.columns) for df in dfs_with_id]
            ):
                raise ValueError(
                    "There are duplicate columns across the dataframes with the same identifier."
                )
            dfs_id_concat = pd.concat(dfs_with_id, axis=1)
            dfs_new.append(dfs_id_concat)
        dfs_concat = pd.concat(dfs_new, keys=ids_unique, names=new_index_levels.names, axis=0)
    dfs_concat.columns = pd.MultiIndex.from_tuples(
        [col.split("/") for col in dfs_concat.columns], names=column_level_names
    )
    return dfs_concat