Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
parquet
Sub-tasks:
document-retrieval
Languages:
Japanese
Size:
< 1K
License:
#!/usr/bin/env python3 | |
import pandas as pd | |
import random | |
from pathlib import Path | |
def create_sample_dataset(): | |
# Load original data | |
base_path = Path("/Users/fodizoltan/Projects/toptal/voyageai/tmp/rteb/data/JapaneseCode1Retrieval") | |
corpus_df = pd.read_parquet(base_path / "corpus" / "corpus-00000-of-00001.parquet") | |
queries_df = pd.read_parquet(base_path / "queries" / "queries-00000-of-00001.parquet") | |
qrels_df = pd.read_parquet(base_path / "data" / "test-00000-of-00001.parquet") | |
print(f"Loaded {len(corpus_df)} corpus docs, {len(queries_df)} queries, {len(qrels_df)} qrels") | |
# Get first 5 queries | |
first_5_queries = queries_df.head(5).copy() | |
print("First 5 queries:") | |
for _, row in first_5_queries.iterrows(): | |
print(f" {row['_id']}: {row['text']}") | |
# Get related corpus documents from qrels | |
first_5_query_ids = first_5_queries['_id'].tolist() | |
related_qrels = qrels_df[qrels_df['query-id'].isin(first_5_query_ids)].copy() | |
related_corpus_ids = related_qrels['corpus-id'].unique() | |
related_corpus = corpus_df[corpus_df['_id'].isin(related_corpus_ids)].copy() | |
print(f"Found {len(related_corpus)} related corpus documents") | |
print(f"Found {len(related_qrels)} qrels") | |
# Tweak the data | |
tweaked_queries = tweak_queries(first_5_queries) | |
tweaked_corpus = tweak_corpus(related_corpus) | |
return tweaked_corpus, tweaked_queries, related_qrels | |
def tweak_queries(queries_df): | |
"""Tweak Japanese queries by changing a few words/characters""" | |
tweaked = queries_df.copy() | |
# Simple tweaks for Japanese text | |
tweaks = { | |
'リスト': 'リスト配列', | |
'要素': 'エレメント', | |
'整数': '数値', | |
'文字列': 'ストリング', | |
'変数': 'パラメータ', | |
'する': 'を行う', | |
'の': 'が持つ' | |
} | |
for idx, row in tweaked.iterrows(): | |
text = row['text'] | |
# Apply random tweaks | |
for old, new in tweaks.items(): | |
if old in text and random.random() < 0.3: # 30% chance to apply each tweak | |
text = text.replace(old, new, 1) # Replace only first occurrence | |
tweaked.at[idx, 'text'] = text | |
return tweaked | |
def tweak_corpus(corpus_df): | |
"""Tweak Python code by changing variable names and some function calls""" | |
tweaked = corpus_df.copy() | |
# Simple code tweaks | |
code_tweaks = { | |
'x': 'data', | |
'i': 'idx', | |
'd': 'digit', | |
'enumerate': 'enumerate_items', | |
'sum': 'total', | |
'len': 'length', | |
'str': 'string', | |
'int': 'integer', | |
'list': 'array' | |
} | |
for idx, row in tweaked.iterrows(): | |
text = row['text'] | |
# Apply random tweaks | |
for old, new in code_tweaks.items(): | |
if old in text and random.random() < 0.4: # 40% chance to apply each tweak | |
# Only replace whole words to avoid breaking code | |
import re | |
pattern = r'\b' + re.escape(old) + r'\b' | |
if re.search(pattern, text): | |
text = re.sub(pattern, new, text, count=1) | |
tweaked.at[idx, 'text'] = text | |
return tweaked | |
def save_sample_dataset(corpus_df, queries_df, qrels_df): | |
"""Save sample dataset in MTEB format""" | |
sample_path = Path("/Users/fodizoltan/Projects/toptal/voyageai/tmp/rteb/data/JapaneseCode1Retrieval-sample") | |
sample_path.mkdir(exist_ok=True) | |
# Create subdirectories | |
(sample_path / "corpus").mkdir(exist_ok=True) | |
(sample_path / "queries").mkdir(exist_ok=True) | |
(sample_path / "data").mkdir(exist_ok=True) | |
# Save parquet files | |
corpus_df.to_parquet(sample_path / "corpus" / "corpus-00000-of-00001.parquet", index=False) | |
queries_df.to_parquet(sample_path / "queries" / "queries-00000-of-00001.parquet", index=False) | |
qrels_df.to_parquet(sample_path / "data" / "test-00000-of-00001.parquet", index=False) | |
print(f"Sample dataset saved to {sample_path}") | |
print(f" - Corpus: {len(corpus_df)} documents") | |
print(f" - Queries: {len(queries_df)} queries") | |
print(f" - Qrels: {len(qrels_df)} relevance judgments") | |
if __name__ == "__main__": | |
corpus, queries, qrels = create_sample_dataset() | |
save_sample_dataset(corpus, queries, qrels) |