File size: 4,354 Bytes
ce6d2a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#!/usr/bin/env python3

import pandas as pd
import random
from pathlib import Path

def create_sample_dataset():
    # Load original data
    base_path = Path("/Users/fodizoltan/Projects/toptal/voyageai/tmp/rteb/data/JapaneseCode1Retrieval")
    
    corpus_df = pd.read_parquet(base_path / "corpus" / "corpus-00000-of-00001.parquet")
    queries_df = pd.read_parquet(base_path / "queries" / "queries-00000-of-00001.parquet")
    qrels_df = pd.read_parquet(base_path / "data" / "test-00000-of-00001.parquet")
    
    print(f"Loaded {len(corpus_df)} corpus docs, {len(queries_df)} queries, {len(qrels_df)} qrels")
    
    # Get first 5 queries
    first_5_queries = queries_df.head(5).copy()
    print("First 5 queries:")
    for _, row in first_5_queries.iterrows():
        print(f"  {row['_id']}: {row['text']}")
    
    # Get related corpus documents from qrels
    first_5_query_ids = first_5_queries['_id'].tolist()
    related_qrels = qrels_df[qrels_df['query-id'].isin(first_5_query_ids)].copy()
    related_corpus_ids = related_qrels['corpus-id'].unique()
    related_corpus = corpus_df[corpus_df['_id'].isin(related_corpus_ids)].copy()
    
    print(f"Found {len(related_corpus)} related corpus documents")
    print(f"Found {len(related_qrels)} qrels")
    
    # Tweak the data
    tweaked_queries = tweak_queries(first_5_queries)
    tweaked_corpus = tweak_corpus(related_corpus)
    
    return tweaked_corpus, tweaked_queries, related_qrels

def tweak_queries(queries_df):
    """Tweak Japanese queries by changing a few words/characters"""
    tweaked = queries_df.copy()
    
    # Simple tweaks for Japanese text
    tweaks = {
        'リスト': 'リスト配列',
        '要素': 'エレメント', 
        '整数': '数値',
        '文字列': 'ストリング',
        '変数': 'パラメータ',
        'する': 'を行う',
        'の': 'が持つ'
    }
    
    for idx, row in tweaked.iterrows():
        text = row['text']
        # Apply random tweaks
        for old, new in tweaks.items():
            if old in text and random.random() < 0.3:  # 30% chance to apply each tweak
                text = text.replace(old, new, 1)  # Replace only first occurrence
        tweaked.at[idx, 'text'] = text
    
    return tweaked

def tweak_corpus(corpus_df):
    """Tweak Python code by changing variable names and some function calls"""
    tweaked = corpus_df.copy()
    
    # Simple code tweaks
    code_tweaks = {
        'x': 'data',
        'i': 'idx', 
        'd': 'digit',
        'enumerate': 'enumerate_items',
        'sum': 'total',
        'len': 'length',
        'str': 'string',
        'int': 'integer',
        'list': 'array'
    }
    
    for idx, row in tweaked.iterrows():
        text = row['text']
        # Apply random tweaks
        for old, new in code_tweaks.items():
            if old in text and random.random() < 0.4:  # 40% chance to apply each tweak
                # Only replace whole words to avoid breaking code
                import re
                pattern = r'\b' + re.escape(old) + r'\b'
                if re.search(pattern, text):
                    text = re.sub(pattern, new, text, count=1)
        tweaked.at[idx, 'text'] = text
    
    return tweaked

def save_sample_dataset(corpus_df, queries_df, qrels_df):
    """Save sample dataset in MTEB format"""
    sample_path = Path("/Users/fodizoltan/Projects/toptal/voyageai/tmp/rteb/data/JapaneseCode1Retrieval-sample")
    sample_path.mkdir(exist_ok=True)
    
    # Create subdirectories
    (sample_path / "corpus").mkdir(exist_ok=True)
    (sample_path / "queries").mkdir(exist_ok=True)
    (sample_path / "data").mkdir(exist_ok=True)
    
    # Save parquet files
    corpus_df.to_parquet(sample_path / "corpus" / "corpus-00000-of-00001.parquet", index=False)
    queries_df.to_parquet(sample_path / "queries" / "queries-00000-of-00001.parquet", index=False)
    qrels_df.to_parquet(sample_path / "data" / "test-00000-of-00001.parquet", index=False)
    
    print(f"Sample dataset saved to {sample_path}")
    print(f"  - Corpus: {len(corpus_df)} documents")
    print(f"  - Queries: {len(queries_df)} queries")
    print(f"  - Qrels: {len(qrels_df)} relevance judgments")

if __name__ == "__main__":
    corpus, queries, qrels = create_sample_dataset()
    save_sample_dataset(corpus, queries, qrels)