| |
| """ |
| Clean FireProtDB 2.0 CSV into ML-ready table with some reformatting. |
| |
| Outputs: |
| - A canonical row-per-experiment table with parsed mutation fields and normalized columns. |
| - Optionally writes Parquet for speed. |
| |
| Usage: |
| python 01_process_csv.py \ |
| --input ../data/fireprotdb_20251015_16.csv \ |
| --output ../data/fireprotdb_clean.parquet |
| |
| Notes: |
| - This script is conservative: it does NOT impute missing ddg/dtm. |
| - It standardizes a few categorical fields; extend mappings as needed. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import math |
| import re |
| from typing import Optional, Tuple, Dict |
|
|
| import pandas as pd |
| |
| _PDB_SPLIT = re.compile(r"[;,| ]+") |
| _PDB_ID = re.compile(r"^[0-9][A-Za-z0-9]{3}$") |
|
|
| def parse_pdb_ids(x: object): |
| """ |
| Returns (pdb_id, pdb_ids) where: |
| - pdb_id: first valid 4-char PDB id (lowercase), or None |
| - pdb_ids: sorted unique list of valid ids (lowercase) |
| """ |
| if not isinstance(x, str): |
| return None, [] |
| s = x.strip() |
| if not s: |
| return None, [] |
|
|
| parts = [p.strip() for p in _PDB_SPLIT.split(s) if p.strip()] |
| ids = [] |
| for p in parts: |
| p = p.strip() |
| |
| p = re.split(r"[:_]", p)[0].strip() |
| if _PDB_ID.match(p): |
| ids.append(p.lower()) |
|
|
| ids = sorted(set(ids)) |
| return (ids[0] if ids else None), ids |
|
|
| |
| |
| |
| |
| |
| _MUT_A123V = re.compile(r"^(?P<wt>[ACDEFGHIKLMNPQRSTVWY])(?P<pos>\d+)(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$") |
| _MUT_123A_GT_V = re.compile(r"^(?P<pos>\d+)(?P<wt>[ACDEFGHIKLMNPQRSTVWY])>(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$") |
|
|
|
|
| def parse_substitution(s: str) -> Tuple[Optional[str], Optional[int], Optional[str], Optional[str]]: |
| """ |
| Returns (wt_residue, position, mut_residue, normalized_mutation_string) |
| """ |
| if not isinstance(s, str) or not s.strip(): |
| return None, None, None, None |
| s = s.strip() |
|
|
| m = _MUT_A123V.match(s) |
| if m: |
| wt = m.group("wt") |
| pos = int(m.group("pos")) |
| mut = m.group("mut") |
| return wt, pos, mut, f"{wt}{pos}{mut}" |
|
|
| m = _MUT_123A_GT_V.match(s) |
| if m: |
| pos = int(m.group("pos")) |
| wt = m.group("wt") |
| mut = m.group("mut") |
| return wt, pos, mut, f"{wt}{pos}{mut}" |
|
|
| |
| |
| return None, None, None, None |
|
|
|
|
| |
| def norm_str(x: object) -> Optional[str]: |
| if not isinstance(x, str): |
| return None |
| x = x.strip() |
| return x if x else None |
|
|
|
|
| BUFFER_MAP: Dict[str, str] = { |
| "sodium tetraborate": "Sodium tetraborate", |
| "tetra-borate": "Sodium tetraborate", |
| "tetraborate": "Sodium tetraborate", |
| "sodium phosphate": "Sodium phosphate", |
| } |
|
|
|
|
| METHOD_MAP: Dict[str, str] = { |
| "dsc": "DSC", |
| "cd": "CD", |
| } |
|
|
|
|
| MEASURE_MAP: Dict[str, str] = { |
| "thermal": "Thermal", |
| } |
|
|
|
|
| def normalize_categoricals(df: pd.DataFrame) -> pd.DataFrame: |
| def map_lower(series: pd.Series, mapping: Dict[str, str]) -> pd.Series: |
| s = series.astype("string") |
| s_lower = s.str.lower().str.strip() |
| return s_lower.map(mapping).fillna(s.str.strip()) |
|
|
| if "BUFFER" in df.columns: |
| df["buffer_norm"] = map_lower(df["BUFFER"], BUFFER_MAP) |
| else: |
| df["buffer_norm"] = pd.NA |
|
|
| if "METHOD" in df.columns: |
| df["method_norm"] = map_lower(df["METHOD"], METHOD_MAP) |
| else: |
| df["method_norm"] = pd.NA |
|
|
| if "MEASURE" in df.columns: |
| df["measure_norm"] = map_lower(df["MEASURE"], MEASURE_MAP) |
| else: |
| df["measure_norm"] = pd.NA |
|
|
| return df |
|
|
|
|
| |
| def to_float(x: object) -> Optional[float]: |
| if x is None or (isinstance(x, float) and math.isnan(x)): |
| return None |
| if isinstance(x, (int, float)): |
| return float(x) |
| if isinstance(x, str): |
| s = x.strip() |
| if not s: |
| return None |
| |
| |
| try: |
| return float(s) |
| except ValueError: |
| |
| m = re.search(r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?", s) |
| if m: |
| try: |
| return float(m.group(0)) |
| except ValueError: |
| return None |
| return None |
|
|
|
|
| def clean_numeric_columns(df: pd.DataFrame) -> pd.DataFrame: |
| |
| for col in ["DDG", "DOMAINOME_DDG", "DG", "DH", "DHVH"]: |
| if col in df.columns: |
| df[col.lower()] = df[col].map(to_float) |
| else: |
| df[col.lower()] = pd.NA |
|
|
| |
| for col in ["TM", "DTM", "EXP_TEMPERATURE"]: |
| if col in df.columns: |
| df[col.lower()] = df[col].map(to_float) |
| else: |
| df[col.lower()] = pd.NA |
| |
| if "DOMAINOME_FITNESS" in df.columns: |
| df['fitness'] = df["DOMAINOME_FITNESS"].map(to_float) |
| else: |
| df['fitness'] = pd.NA |
|
|
| |
| if "PH" in df.columns: |
| df["ph"] = df["PH"].map(to_float) |
| else: |
| df["ph"] = pd.NA |
|
|
| return df |
|
|
|
|
| def derive_labels(df: pd.DataFrame) -> pd.DataFrame: |
| |
| |
| if "STABILIZING" in df.columns: |
| s = df["STABILIZING"].astype("string").str.lower().str.strip() |
| df["stabilizing_explicit"] = s.map({"yes": True, "no": False}) |
| else: |
| df["stabilizing_explicit"] = pd.NA |
|
|
| |
| df["stabilizing_ddg"] = df["ddg"].apply(lambda v: True if isinstance(v, float) and v < 0 else (False if isinstance(v, float) and v > 0 else pd.NA)) |
|
|
| |
| df["stabilizing"] = df["stabilizing_explicit"] |
| df.loc[df["stabilizing"].isna(), "stabilizing"] = df.loc[df["stabilizing"].isna(), "stabilizing_ddg"] |
|
|
| return df |
|
|
|
|
| def select_and_rename(df: pd.DataFrame) -> pd.DataFrame: |
| |
| keep = { |
| "EXPERIMENT_ID": "experiment_id", |
| "SEQUENCE_ID": "sequence_id", |
| "MUTANT_ID": "mutant_id", |
| "SOURCE_SEQUENCE_ID": "source_sequence_id", |
| "TARGET_SEQUENCE_ID": "target_sequence_id", |
| "SEQUENCE_LENGTH": "sequence_length", |
| "SUBSTITUTION": "substitution_raw", |
| "INSERTION": "insertion_raw", |
| "DELETION": "deletion_raw", |
| "PROTEIN": "protein_name", |
| "ORGANISM": "organism", |
| "UNIPROTKB": "uniprotkb", |
| "EC_NUMBER": "ec_number", |
| "INTERPRO": "interpro", |
| "PUBLICATION_PMID": "pmid", |
| "PUBLICATION_DOI": "doi", |
| "PUBLICATION_YEAR": "publication_year", |
| "SOURCE_DATASET": "source_dataset", |
| "REFERENCING_DATASET": "referencing_dataset", |
| "WWPDB": "wwpdb_raw", |
|
|
| } |
|
|
| out = pd.DataFrame() |
| for src, dst in keep.items(): |
| out[dst] = df[src] if src in df.columns else pd.NA |
|
|
| |
| extra_cols = [ |
| "ddg", "domainome_ddg", "dg", "dh", "dhvh", |
| "tm", "dtm", "exp_temperature", "fitness", |
| "ph", |
| "buffer_norm", "method_norm", "measure_norm", |
| "stabilizing", |
| ] |
| for c in extra_cols: |
| out[c] = df[c] if c in df.columns else pd.NA |
|
|
| |
| for src, dst in [("BUFFER", "buffer_raw"), ("BUFFER_CONC", "buffer_conc_raw"), ("ION", "ion_raw"), ("ION_CONC", "ion_conc_raw"), ("STATE", "state")]: |
| out[dst] = df[src] if src in df.columns else pd.NA |
| out["pdb_id"] = df["pdb_id"] if "pdb_id" in df.columns else pd.NA |
| out["pdb_ids"] = df["pdb_ids"] if "pdb_ids" in df.columns else [[] for _ in range(len(df))] |
| return out |
|
|
|
|
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--input", help="Path to raw FireProtDB 2.0 CSV", default='../data/fireprotdb_20251015-164116.csv') |
| ap.add_argument("--output", help="Path to output .parquet or .csv", default='../data/fireprotdb_cleaned.parquet') |
| ap.add_argument("--min_seq_len", type=int, default=1, help="Drop sequences shorter than this") |
| ap.add_argument("--drop_no_label", action="store_true", help="Drop rows with neither ddg nor dtm") |
| args = ap.parse_args() |
|
|
| |
| df = pd.read_csv(args.input, dtype="string", keep_default_na=False, na_values=["", "NA", "NaN", "nan"]) |
| df = df.replace({"": pd.NA}) |
|
|
| |
| for c in df.columns: |
| if pd.api.types.is_string_dtype(df[c]): |
| df[c] = df[c].astype("string").str.strip() |
|
|
| |
| df = normalize_categoricals(df) |
| df = clean_numeric_columns(df) |
|
|
| |
| parsed = df["SUBSTITUTION"].apply(lambda x: parse_substitution(x) if "SUBSTITUTION" in df.columns else (None, None, None, None)) |
| df["wt_residue"] = parsed.map(lambda t: t[0]) |
| df["position"] = parsed.map(lambda t: t[1]).astype("Int64") |
| df["mut_residue"] = parsed.map(lambda t: t[2]) |
| df["mutation"] = parsed.map(lambda t: t[3]) |
|
|
| df = derive_labels(df) |
|
|
| if "WWPDB" in df.columns: |
| parsed_pdb = df["WWPDB"].astype("string").fillna("").apply(lambda v: parse_pdb_ids(str(v))) |
| df["pdb_id"] = parsed_pdb.map(lambda t: t[0]) |
| df["pdb_ids"] = parsed_pdb.map(lambda t: t[1]) |
| else: |
| df["pdb_id"] = pd.NA |
| df["pdb_ids"] = [[] for _ in range(len(df))] |
|
|
| |
| if "SEQUENCE_LENGTH" in df.columns: |
| seq_len = df["SEQUENCE_LENGTH"].map(to_float) |
| df["sequence_length_num"] = seq_len |
| df = df[df["sequence_length_num"].fillna(0) >= args.min_seq_len] |
|
|
| if args.drop_no_label: |
| df = df[~(df["ddg"].isna() & df["dtm"].isna())] |
|
|
| |
| out = select_and_rename(df) |
|
|
| |
| out["wt_residue"] = df["wt_residue"] |
| out["position"] = df["position"] |
| out["mut_residue"] = df["mut_residue"] |
| out["mutation"] = df["mutation"] |
|
|
| |
| if "experiment_id" in out.columns: |
| out = out.drop_duplicates(subset=["experiment_id"]) |
|
|
| |
| if args.output.lower().endswith(".parquet"): |
| out.to_parquet(args.output, index=False) |
| elif args.output.lower().endswith(".csv"): |
| out.to_csv(args.output, index=False) |
| else: |
| raise ValueError("Output must end with .parquet or .csv") |
|
|
| print(f"Wrote {len(out):,} rows to {args.output}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|