|
0-layer transformer described in [A Mathematical Framework for Transformer Circuits](https://transformer-circuits.pub/2021/framework/index.html). |
|
Load with |
|
```python |
|
class ZeroLayerTransformer(PreTrainedModel): |
|
config_class = LlamaConfig |
|
|
|
def __init__(self, config: LlamaConfig): |
|
super().__init__(config) |
|
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) |
|
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
|
def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs): |
|
hidden_states = self.embed_tokens(input_ids) |
|
logits = self.lm_head(hidden_states) |
|
|
|
loss = None |
|
if labels is not None: |
|
shift_logits = logits[..., :-1, :].contiguous() |
|
shift_labels = labels[..., 1:].contiguous() |
|
loss_fct = nn.CrossEntropyLoss() |
|
loss = loss_fct( |
|
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
|
) |
|
|
|
return {"loss": loss, "logits": logits} |
|
|
|
|
|
|
|
model = ZeroLayerTransformer.from_pretrained('Butanium/simple-stories-zero-layer-simple-transformer') |
|
``` |
|
The model is trained on the SimpleStories dataset. |