File size: 3,451 Bytes
3fffb69
 
cadb900
 
3fffb69
 
 
 
 
 
 
 
 
 
 
cadb900
 
 
3fffb69
5b7503b
 
3fffb69
 
 
 
 
5b7503b
3fffb69
5b7503b
 
 
 
 
3fffb69
 
 
 
 
 
 
 
 
5b7503b
3fffb69
 
 
 
 
 
 
 
 
 
cadb900
5b7503b
3fffb69
 
 
 
 
 
 
 
5b7503b
 
3fffb69
 
 
 
 
 
 
 
 
 
 
 
f2602f0
 
3fffb69
 
 
accd6af
 
 
 
 
 
 
 
3fffb69
 
 
 
 
cadb900
5b7503b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# -*- coding: utf-8 -*-
"""
Martinez-Gil, J. (2025). Augmenting the Interpretability of GraphCodeBERT for Code Similarity Tasks.
International Journal of Software Engineering and Knowledge Engineering, 35(05), 657–678.
"""

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from transformers import RobertaTokenizer, RobertaModel
import torch
import gradio as gr
from io import BytesIO
from PIL import Image

# Load GraphCodeBERT from Hugging Face (with cache)
tokenizer = RobertaTokenizer.from_pretrained("microsoft/graphcodebert-base", cache_dir="models/")
model = RobertaModel.from_pretrained("microsoft/graphcodebert-base", cache_dir="models/")

# Default sorting algorithm code snippets
default_code_1 = """def bubble_sort(arr):
    n = len(arr)
    for i in range(n):
        for j in range(0, n-i-1):
            if arr[j] > arr[j+1]:
                arr[j], arr[j+1] = arr[j+1], arr[j]
    return arr"""

default_code_2 = """def quick_sort(arr, low, high):
    if low < high:
        pi = partition(arr, low, high)
        quick_sort(arr, low, pi - 1)
        quick_sort(arr, pi + 1, high)

def partition(arr, low, high):
    i = (low - 1)
    pivot = arr[high]
    for j in range(low, high):
        if arr[j] <= pivot:
            i += 1
            arr[i], arr[j] = arr[j], arr[i]
    arr[i+1], arr[high] = arr[high], arr[i+1]
    return (i + 1)"""

# Get token embeddings for a code snippet
def get_token_embeddings(code):
    inputs = tokenizer(code, return_tensors="pt", max_length=512, truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)
    token_embeddings = outputs.last_hidden_state.squeeze(0).cpu().numpy()
    tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'].squeeze())
    return token_embeddings, tokens

# Plot comparison between two algorithms
def compare_algorithms(code1, code2):
    emb1, tokens1 = get_token_embeddings(code1)
    emb2, tokens2 = get_token_embeddings(code2)

    combined = np.concatenate([emb1, emb2], axis=0)
    pca = PCA(n_components=2)
    coords = pca.fit_transform(combined)

    plt.figure(figsize=(6, 5), dpi=150)
    plt.scatter(coords[:len(tokens1), 0], coords[:len(tokens1), 1], color='red', label="Code 1", s=20)
    plt.scatter(coords[len(tokens1):, 0], coords[len(tokens1):, 1], color='blue', label="Code 2", s=20)
    plt.legend()
    plt.xticks([]); plt.yticks([]); plt.grid(False)

    buf = BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight')
    plt.close()
    buf.seek(0)
    return Image.open(buf)

interface = gr.Interface(
    fn=compare_algorithms,
    inputs=[
        gr.Code(language="python", value=default_code_1, label="Code 1"),
        gr.Code(language="python", value=default_code_2, label="Code 2")
    ],
    outputs=gr.Image(type="pil", label="Token Embedding PCA"),
    title="GraphCodeBERT Token Embedding Comparison",
    description="Edit or paste two Python code snippets. This tool compares their token-level embeddings using GraphCodeBERT and PCA.",
    article="""
**Citation**  
Martinez-Gil, J. (2025). *Augmenting the Interpretability of GraphCodeBERT for Code Similarity Tasks.* International Journal of Software Engineering and Knowledge Engineering, 35(05), 657–678.

**GitHub Repository**  
[View Source on GitHub](https://github.com/jorge-martinez-gil/graphcodebert-interpretability)
"""
)

if __name__ == "__main__":
    interface.launch()