|
""" |
|
- Used in RPv2 exploration, including: |
|
- Plot certain quality signals |
|
- Get doc/char counts (e.g. after filtering) |
|
- Store minhashes of filtered documents for further dedup |
|
""" |
|
|
|
import gzip |
|
import orjson |
|
import numpy as np |
|
import pandas as pd |
|
import os |
|
import matplotlib.pyplot as plt |
|
from tqdm import tqdm |
|
import copy |
|
import multiprocessing |
|
import random |
|
import pathlib |
|
import seaborn as sns |
|
from rules.rules import gopher_rules_pass |
|
import pyarrow |
|
|
|
ROOT_PATH = "/home1/BharatGPT_Data/RedPajamaV2" |
|
DATA_ROOT_PATH = "/home1/BharatGPT_Data/RedPajamaV2/data" |
|
PLOTS_ROOT_PATH = "/home1/BharatGPT_Data/RedPajamaV2/plots" |
|
SNAPSHOT = "2023-14" |
|
LANGUAGE = "en" |
|
PARTITION_KEY = "head" |
|
SIGNALS_DIR = os.path.join(DATA_ROOT_PATH, "quality_signals", SNAPSHOT) |
|
DUPLICATES_DIR = os.path.join(DATA_ROOT_PATH, "duplicates", SNAPSHOT) |
|
MINHASH_DIR = os.path.join(DATA_ROOT_PATH, "minhash", SNAPSHOT) |
|
NUM_CORES = 60 |
|
SEED = 2024 |
|
DO_PLOT = False |
|
COUNT_ONLY = True |
|
STORE_SIGNALS = False |
|
OUTPUT_FILES = False |
|
|
|
|
|
NUM_SHARDS_PROCESSED = 100 |
|
if DO_PLOT: |
|
PLOTS_DIR = os.path.join(PLOTS_ROOT_PATH, PARTITION_KEY, f"random_{NUM_SHARDS_PROCESSED}_gopher", "quality_rep_log") |
|
NUM_DOCS_HEAD = 2533743 |
|
NUM_DOCS_MIDDLE = 3722022 |
|
NUM_CHARS_HEAD = 12932890455 |
|
NUM_CHARS_MIDDLE = 17883781733 |
|
|
|
NUM_SHARDS_PROCESSED = 5000 |
|
if OUTPUT_FILES: |
|
OUT_DIR = os.path.join(DATA_ROOT_PATH, f"minhash_filtered", SNAPSHOT) |
|
|
|
random.seed(SEED) |
|
np.random.seed(SEED) |
|
|
|
assert sorted(os.listdir(SIGNALS_DIR)) == sorted(os.listdir(DUPLICATES_DIR)) |
|
assert (not DO_PLOT) or STORE_SIGNALS |
|
|
|
if COUNT_ONLY: |
|
assert DO_PLOT == False, "Plotting requires storing signal values" |
|
all_shards_counts = { |
|
"doc_count": 0, |
|
"char_count": 0 |
|
} |
|
elif STORE_SIGNALS: |
|
all_shards_signals = { |
|
"ccnet_perplexity": [], |
|
|
|
|
|
|
|
|
|
"ccnet_length": [], |
|
"rps_doc_stop_word_fraction": [], |
|
"rps_doc_lorem_ipsum": [] |
|
} |
|
all_shards_signals_empty = copy.deepcopy(all_shards_signals) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
partitions_dict = { |
|
"head": ["head"], |
|
"middle": ["middle"], |
|
"head_middle": ["head", "middle"], |
|
} |
|
|
|
for partition in partitions_dict[PARTITION_KEY]: |
|
def process_shard(shard): |
|
|
|
|
|
if OUTPUT_FILES: |
|
out_path_shard = os.path.join(OUT_DIR, shard) |
|
out_file_path = os.path.join(out_path_shard, f"{LANGUAGE}_{partition}_filtered.minhash.parquet") |
|
if os.path.exists(out_file_path): |
|
raise Exception("ERROR: output file already present") |
|
return [] |
|
minhash_path = os.path.join(MINHASH_DIR, shard, f"{LANGUAGE}_{partition}.minhash.parquet") |
|
try: |
|
df = pd.read_parquet(minhash_path) |
|
except pyarrow.lib.ArrowInvalid as __e: |
|
|
|
print(f"ERROR with shard {shard}: empty minhash file") |
|
return [] |
|
|
|
signals_path = os.path.join(SIGNALS_DIR, shard, f"{LANGUAGE}_{partition}.signals.json.gz") |
|
try: |
|
duplicates_path = os.path.join(DUPLICATES_DIR, shard, f"{LANGUAGE}_{partition}.duplicates.parquet") |
|
shard_dups = pd.read_parquet(duplicates_path, columns=["doc_id"]) |
|
shard_dups_set = set(shard_dups["doc_id"].tolist()) |
|
except pyarrow.lib.ArrowInvalid as __e: |
|
|
|
shard_dups_set = set() |
|
|
|
results = None |
|
if COUNT_ONLY: |
|
results = { |
|
"doc_count": 0, |
|
"char_count": 0 |
|
} |
|
elif STORE_SIGNALS: |
|
results = copy.deepcopy(all_shards_signals_empty) |
|
elif OUTPUT_FILES: |
|
results = [] |
|
idx = -1 |
|
|
|
with gzip.open(signals_path, 'r') as signals_file: |
|
for line in signals_file: |
|
idx += 1 |
|
signals_dict = orjson.loads(line) |
|
if signals_dict["id"] not in shard_dups_set and gopher_rules_pass(signals_dict): |
|
""" |
|
Note about exact duplicates: |
|
https://github.com/togethercomputer/RedPajama-Data/issues/84#issuecomment-1840299911 |
|
One copy remains with this method |
|
""" |
|
if COUNT_ONLY: |
|
results["doc_count"] += 1 |
|
results["char_count"] += signals_dict["quality_signals"]["ccnet_length"][0][2] |
|
elif STORE_SIGNALS: |
|
for k in list(results.keys()): |
|
assert len(signals_dict["quality_signals"][k]) == 1 |
|
assert len(signals_dict["quality_signals"][k][0]) == 3 |
|
results[k].append(signals_dict["quality_signals"][k][0][2]) |
|
elif OUTPUT_FILES: |
|
results.append(idx) |
|
|
|
if OUTPUT_FILES: |
|
df = df.iloc[results, :] |
|
pathlib.Path(out_path_shard).mkdir(parents=True, exist_ok=True) |
|
df.to_parquet(os.path.join(out_path_shard, f"{LANGUAGE}_{partition}_filtered.minhash.parquet")) |
|
|
|
return results |
|
|
|
with multiprocessing.Pool(NUM_CORES) as pool: |
|
shards_list = os.listdir(SIGNALS_DIR) |
|
all_results = list(tqdm(pool.imap(process_shard, random.sample(sorted(shards_list), k=NUM_SHARDS_PROCESSED)), total=NUM_SHARDS_PROCESSED)) |
|
|
|
for results in all_results: |
|
if COUNT_ONLY: |
|
for k in list(all_shards_counts.keys()): |
|
all_shards_counts[k] += results[k] |
|
elif STORE_SIGNALS: |
|
for k in list(all_shards_signals.keys()): |
|
all_shards_signals[k].extend(results[k]) |
|
|
|
|
|
if COUNT_ONLY: |
|
print(all_shards_counts) |
|
|
|
if DO_PLOT: |
|
pathlib.Path(PLOTS_DIR).mkdir(parents=True, exist_ok=True) |
|
for k, v in all_shards_signals.items(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sns.histplot(x=v, bins=100, log_scale=True) |
|
|
|
|
|
|
|
|
|
|
|
plt.savefig(os.path.join(PLOTS_DIR, f"{k}.png")) |
|
plt.close() |
|
|
|
|
|
|
|
|
|
|