| import pickle |
| import numpy as np |
| import pandas as pd |
| from tokenizers import Tokenizer |
| import os |
|
|
| DATA_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/eval_data.pkl" |
| RESULTS_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/evaluation_results.pkl" |
|
|
| |
| VOCAB_PATHS = { |
| "Merged_uni": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram.json", |
| "Merged_word": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_wordPiece.json", |
| "Weighted": "/home/n5huang/dna_token/tokenizer_evaluation/weighted_bpe/tokenizer.json", |
| "SeqOnly": "/home/n5huang/dna_token/tokenizer_evaluation/baseline_bpe/tokenizer.json", |
| "DNAbert2": "/home/n5huang/dna_token/pretrain/models/DNAbert2_Pretrained/tokenizer.json", |
| "Grover": "/home/n5huang/dna_token/pretrain/models/Grover_Pretrained/tokenizer.json", |
| } |
|
|
| def evaluate_tokenizer_on_phyloP(tokenizer, sequences, phyloPs): |
| """ |
| For each tokenizer, compute: |
| - token_mean_scores: list of mean phyloP per token occurrence |
| - token_variances: list of variance per token occurrence |
| - token_names: list of token strings |
| """ |
|
|
| token_means = [] |
| token_vars = [] |
| token_names = [] |
|
|
| total_tokens = 0 |
|
|
| |
|
|
| for seq, scores in zip(sequences, phyloPs): |
| |
| if len(seq) < 100: |
| continue |
|
|
| enc = tokenizer.encode(seq.upper()) |
| total_tokens += len(enc.ids) |
|
|
| for tok, (start, end) in zip(enc.tokens, enc.offsets): |
| region = scores[start:end] |
|
|
| if len(region) == 0: |
| continue |
|
|
| m = region.mean() |
| v = region.var() |
|
|
| token_means.append(m) |
| token_vars.append(v) |
| token_names.append(tok) |
|
|
| print(f" -> Processed {total_tokens:,} tokens.") |
| return { |
| "mean": np.array(token_means), |
| "var": np.array(token_vars), |
| "token": token_names |
| } |
|
|
|
|
| |
| if __name__ == "__main__": |
| print("Loading data from pickle...") |
| with open(DATA_PATH, "rb") as f: |
| data = pickle.load(f) |
| |
| sequences = data["test_sequences"] |
| phyloPs = data["test_phyloP"] |
| print(f"Loaded {len(sequences)} genomic windows.") |
|
|
| |
| tokenizers = {} |
| print("Loading tokenizers...") |
| for name, path in VOCAB_PATHS.items(): |
| if os.path.exists(path): |
| tokenizers[name] = Tokenizer.from_file(path) |
| print(f"✅ Loaded {name}") |
| else: |
| print(f"❌ Warning: File {path} not found. Skipping.") |
|
|
| |
| results = {} |
| print("\nStarting Benchmark (with Token Names)...") |
| |
| for name, tok in tokenizers.items(): |
| print(f"Evaluating {name}...") |
| results[name] = evaluate_tokenizer_on_phyloP(tok, sequences, phyloPs) |
|
|
| |
| print(f"\nSaving results to {RESULTS_PATH}...") |
| with open(RESULTS_PATH, "wb") as f: |
| pickle.dump(results, f) |
| |
| print("Success! Download 'evaluation_results.pkl' (Note: File size will be larger).") |
|
|