applied-ai-018's picture
Add files using upload-large-folder tool
bac9aed verified
"""
Utility to get doc and character counts
"""
import os
import gzip
import orjson
import multiprocessing
from tqdm import tqdm
import json
def process_shard_unfiltered(
shard
):
global LANGUAGE
global PARTITION
global SIGNALS_DIR
global SIGNALS_EXTENSION
result = {
"doc_count": 0,
"char_count": 0
}
signals_file_path = os.path.join(
SIGNALS_DIR,
shard,
f"{LANGUAGE}_{PARTITION}.{SIGNALS_EXTENSION}"
)
with gzip.open(signals_file_path, "r") as signals_file:
for line in signals_file:
signals_dict = orjson.loads(line)
result["doc_count"] += 1
result["char_count"] += signals_dict["quality_signals"]["ccnet_length"][0][2]
return result
def process_shard_docids(
shard
):
global LANGUAGE
global PARTITION
global SIGNALS_DIR
global SIGNALS_EXTENSION
global DOCIDS_DIR
docids_file_path = os.path.join(
DOCIDS_DIR,
shard,
f"{LANGUAGE}_{PARTITION}.{DOCIDS_EXTENSION}"
)
docids = json.load(open(docids_file_path))
docids_set = set(docids)
result = {
"doc_count": 0,
"char_count": 0
}
signals_file_path = os.path.join(
SIGNALS_DIR,
shard,
f"{LANGUAGE}_{PARTITION}.{SIGNALS_EXTENSION}"
)
with gzip.open(signals_file_path, "r") as signals_file:
for line in signals_file:
signals_dict = orjson.loads(line)
if signals_dict["id"] in docids_set:
result["doc_count"] += 1
result["char_count"] += signals_dict["quality_signals"]["ccnet_length"][0][2]
assert result["doc_count"] == len(docids_set), f"ERROR in counts for {shard=}"
return result
def process_docids_store(
processing_function
):
with multiprocessing.Pool(NUM_CORES) as pool:
shards_list = sorted(os.listdir(DOCIDS_DIR))
all_results = list(tqdm(
pool.imap(processing_function, shards_list),
total=len(shards_list)
))
all_shards_counts = {
"doc_count": 0,
"char_count": 0
}
for result in all_results:
for k in list(all_shards_counts.keys()):
all_shards_counts[k] += result[k]
return all_shards_counts
if __name__ == "__main__":
NUM_CORES = 50
SNAPSHOT = "2023-14"
LANGUAGE = "en"
PARTITION = "head"
DATA_ROOT_DIR = "/mnt/weka/peacock/enfm-dataprocessing/RedPajamaV2/data"
SIGNALS_DIR = f"{DATA_ROOT_DIR}/quality_signals/{SNAPSHOT}"
DOCIDS_DIR = f"{DATA_ROOT_DIR}/filtered_docids/quality_filtered_minus_clustered/{SNAPSHOT}"
SIGNALS_EXTENSION = "signals.json.gz"
DOCIDS_EXTENSION = "json"
GET_COMPLETE_DATASET_COUNTS = False # set True to count complete (unfiltered) dataset counts
# but note that SIGNALS_DIR then must be that of unfiltered only
# (not a subset, otherwise will get its counts)
if GET_COMPLETE_DATASET_COUNTS:
print(process_docids_store(
process_shard_unfiltered
))
else:
print(process_docids_store(
process_shard_docids
))
"""
Notes
- Counts format: (doc_count, char_count)
- RedPajamaV2 Snapshot 2023-14
- unfiltered
- head: (133M, 678.9B)
- middle: (194.3M, 944.8B)
- total: (327.3M, 1623.7B)
- quality_filtered
- head: (76.5M, 437.4B)
- middle: (113.9M, 605.9B)
- quality_filtered_minus_clustered
- head: (67.6M, 382.8B)
- middle: (102.8M, 539.5B)
- total: (170.4M, 922.3B): 52% docs, 57% chars retained
"""