applied-ai-018's picture
Add files using upload-large-folder tool
bac9aed verified
"""
Utility to check result of minhash-clustering
"""
import pandas as pd
import orjson
import gzip
import os
from common import listdir_fullpath
from tqdm import tqdm
def get_file_subpath(
language,
partition,
extension
):
return f"{language}_{partition}.{extension}"
if __name__ == "__main__":
CLUSTERS_DIR = "/home1/BharatGPT_Data/RedPajamaV2/data/minhash_clusters/2023-14"
DOCS_DIR = "/home1/BharatGPT_Data/RedPajamaV2/data/documents/2023-14"
CLUSTERS_SHARD = "0187"
LANGUAGE = "en"
PARTITION = "head"
CLUSTERS_EXTENSION = "clusters.parquet"
DOCS_EXTENSION = "json.gz"
df = pd.read_parquet(os.path.join(
CLUSTERS_DIR,
CLUSTERS_SHARD,
get_file_subpath(LANGUAGE, PARTITION, CLUSTERS_EXTENSION)
), columns=["cluster_id"])
required_cluster_id = df["cluster_id"][0]
# get all docs with this cluster id
for shard in tqdm(os.listdir(CLUSTERS_DIR)):
df2 = pd.read_parquet(os.path.join(
CLUSTERS_DIR,
shard,
get_file_subpath(LANGUAGE, PARTITION, CLUSTERS_EXTENSION)
), columns=["id", "cluster_id"])
df2 = df2[df2["cluster_id"] == required_cluster_id]
doc_ids = []
for _, row in df2.iterrows():
doc_id = int(row["id"].split('/')[-1])
doc_ids.append(doc_id)
if len(doc_ids) == 0:
continue
with gzip.open(os.path.join(
DOCS_DIR,
shard,
get_file_subpath(LANGUAGE, PARTITION, DOCS_EXTENSION)
)) as docs_file:
next_idx = 0
next_doc_id = doc_ids[next_idx]
for idx, line in enumerate(docs_file):
if idx == next_doc_id:
print(line)
print()
next_idx += 1
if next_idx < len(doc_ids):
next_doc_id = doc_ids[next_idx]