PluginTransformer / src /model /super_tokenizer.py
rainym00d's picture
Upload folder using huggingface_hub
c96df66 verified
import torch
import torch.nn as nn
from transformers import LlamaModel
from transformers.models.llama.configuration_llama import LlamaConfig
class SuperTokenizer(LlamaModel):
def __init__(self, config: LlamaConfig):
super().__init__(config)
self.batch_size = 1
self.my_pooler = nn.Linear(config.hidden_size, 4096)
def forward(self, input_ids, attention_mask, super_token_indices, **kwargs):
hidden_state = []
for i in range(0, input_ids.shape[0], self.batch_size):
end_index = min(i + self.batch_size, input_ids.shape[0])
output = super().forward(
input_ids[i:end_index],
attention_mask[i:end_index],
**kwargs,
)
for j in range(end_index - i):
hidden_state.append(
output.last_hidden_state[j][super_token_indices[i + j]]
)
embedding = torch.cat(hidden_state, dim=0).contiguous()
return embedding