|
Advesarially fined-tuned TinyStories-33M models + Various SAEs |
|
|
|
See https://github.com/HannesThurnherr/advint/commit/e819b53e54c57ba70b28ef2c18e82418e3980509 |
|
|
|
```python |
|
from transformer_lens import HookedTransformer |
|
from SAE import TopKSparseAutoencoder # see https://github.com/HannesThurnherr/advint/blob/main/SAE.py |
|
|
|
model = HookedTransformer.from_pretrained("roneneldan/TinyStories-33M") |
|
model.load_state_dict(torch.load("lm_adv.pth")) |
|
|
|
resid_dim = model.cfg.d_model |
|
sae_latent_dim = 10 * resid_dim |
|
|
|
sae = TopKSparseAutoencoder(input_dim=resid_dim, latent_dim=sae_latent_dim, k=25) |
|
sae.load_state_dict(torch.load("models/sae_base.pth")) |
|
#sae.load_state_dict(torch.load("models/sae_base_e2e.pth")) |
|
#sae.load_state_dict(torch.load("models/sae_adv.pth")) |
|
#sae.load_state_dict(torch.load("models/sae_post_adv.pth")) |
|
#sae.load_state_dict(torch.load("models/sae_post_adv_e2e.pth")) |
|
``` |