|
|
from datasets import load_dataset |
|
|
import pandas as pd |
|
|
import pyarrow as pa |
|
|
import pyarrow.parquet as pq |
|
|
import json |
|
|
|
|
|
|
|
|
dataset = load_dataset('code_search_net', 'python') |
|
|
|
|
|
|
|
|
print(dataset['train'][0]) |
|
|
|
|
|
|
|
|
def split_code_into_chunks(code, chunk_length): |
|
|
"""将代码按空格分词,并按指定长度切分成多个块""" |
|
|
words = code.split() |
|
|
chunks = [] |
|
|
|
|
|
|
|
|
for i in range(0, len(words), chunk_length): |
|
|
chunk = words[i:i + chunk_length] |
|
|
chunks.append(' '.join(chunk)) |
|
|
return chunks |
|
|
|
|
|
|
|
|
def split_code_samples(dataset, chunk_length=32): |
|
|
"""对数据集中的每个样本进行切分""" |
|
|
chunks = [] |
|
|
for item in dataset: |
|
|
|
|
|
code = item['func_code_string'] |
|
|
|
|
|
code_chunks = split_code_into_chunks(code, chunk_length) |
|
|
chunks.extend(code_chunks) |
|
|
return chunks |
|
|
|
|
|
|
|
|
code_chunks_32 = split_code_samples(dataset['train'], chunk_length=32) |
|
|
code_chunks_64 = split_code_samples(dataset['train'], chunk_length=64) |
|
|
code_chunks_128 = split_code_samples(dataset['train'], chunk_length=128) |
|
|
code_chunks_256 = split_code_samples(dataset['train'], chunk_length=256) |
|
|
|
|
|
|
|
|
def save_to_parquet(data, filename): |
|
|
|
|
|
df = pd.DataFrame(data, columns=["code"]) |
|
|
|
|
|
|
|
|
df.to_parquet(filename, engine='pyarrow', compression='snappy') |
|
|
print(f"Saved {filename} as Parquet file.") |
|
|
|
|
|
|
|
|
save_to_parquet(code_chunks_32, "code_chunks_32.parquet") |
|
|
save_to_parquet(code_chunks_64, "code_chunks_64.parquet") |
|
|
save_to_parquet(code_chunks_128, "code_chunks_128.parquet") |
|
|
save_to_parquet(code_chunks_256, "code_chunks_256.parquet") |
|
|
|