|
|
|
|
|
|
|
|
""" |
|
|
Run this script as ./conversion_script.py to convert the SCITE files to HF-compatible parquet files. |
|
|
""" |
|
|
|
|
|
import re |
|
|
from typing import Literal |
|
|
|
|
|
import pandas as pd |
|
|
|
|
|
|
|
|
Split = Literal["train", "test"] |
|
|
|
|
|
def convert_for_causality_detection(split: Split) -> None: |
|
|
df = pd.read_xml(f"{split}-corpus.xml") |
|
|
df["label"] = df["label"].apply(lambda x: 0 if x == "Non-Causal" else 1) |
|
|
df["text"] = df["sentence"].apply(lambda x: re.sub(r'</?e\d+>', "", x)) |
|
|
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}") |
|
|
df = df.set_index("index") |
|
|
df = df[["label", "text"]] |
|
|
df.to_parquet(f"./causality-detection/{split}.parquet", engine="pyarrow") |
|
|
|
|
|
def convert_for_causal_candidate_extraction(split: Split) -> None: |
|
|
def map_to_tokens(text: str): |
|
|
splits: list[str] = [] |
|
|
tags: list[set[int]] = [] |
|
|
curtags: set[int] = set() |
|
|
for match in re.finditer(r"(.*?)<(/?)e(\d+)>", text): |
|
|
splits.append(match[1]) |
|
|
tags.append(list(curtags)) |
|
|
if match[2] == "": |
|
|
curtags.add(int(match[3])) |
|
|
else: |
|
|
curtags.remove(int(match[3])) |
|
|
return pd.Series((splits, tags)) |
|
|
df = pd.read_xml(f"{split}-corpus.xml") |
|
|
df[["tokens", "entity"]] = df["sentence"].apply(map_to_tokens) |
|
|
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}") |
|
|
df = df[["index", "tokens", "entity"]].set_index("index") |
|
|
print(df) |
|
|
df.to_parquet(f"./causal-candidate-extraction/{split}.parquet", engine="pyarrow") |
|
|
|
|
|
def convert_for_causality_identification(split: Split) -> None: |
|
|
def map_label(label: str): |
|
|
if label == "Non-Causal": |
|
|
return [] |
|
|
tmp = list() |
|
|
for t in label[len("Cause-Effect("):-1].split("),("): |
|
|
left, right = t.strip('()').split(',') |
|
|
tmp.append({"relationship": 1, "first": left, "second": right}) |
|
|
return tmp |
|
|
df = pd.read_xml(f"{split}-corpus.xml", dtype_backend="pyarrow") |
|
|
df["relations"] = df["label"].apply(map_label) |
|
|
df["text"] = df["sentence"] |
|
|
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}") |
|
|
df = df.set_index("index") |
|
|
|
|
|
df = df[["text", "relations"]] |
|
|
df.to_parquet(f"./causality-identification/{split}.parquet", engine="pyarrow") |
|
|
|
|
|
convert_for_causality_detection("test") |
|
|
convert_for_causality_detection("train") |
|
|
convert_for_causal_candidate_extraction("test") |
|
|
convert_for_causal_candidate_extraction("train") |
|
|
convert_for_causality_identification("test") |
|
|
convert_for_causality_identification("train") |