# evo_model.py import torch import torch.nn as nn import torch.nn.functional as F class EvoEncoder(nn.Module): def __init__(self, d_model=512, num_heads=8, ffn_dim=1024, num_layers=6, memory_enabled=True): super().__init__() self.embedding = nn.Embedding(30522, d_model) self.memory_enabled = memory_enabled if memory_enabled: self.memory_proj = nn.Linear(d_model, d_model) self.memory_token = nn.Parameter(torch.zeros(1, 1, d_model)) else: self.memory_token = None encoder_layer = nn.TransformerEncoderLayer( d_model=d_model, nhead=num_heads, dim_feedforward=ffn_dim, batch_first=True ) self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) def forward(self, input_ids): x = self.embedding(input_ids) if self.memory_enabled and self.memory_token is not None: mem = self.memory_token.expand(x.size(0), 1, x.size(2)) x = torch.cat([mem, x], dim=1) x = self.transformer(x) return x class EvoTransformerV22(nn.Module): def __init__(self, config=None): super().__init__() # Default architecture if no config is passed if config is None: config = { "num_layers": 6, "ffn_dim": 1024, "num_heads": 8, "memory_enabled": True } self.encoder = EvoEncoder( d_model=512, num_heads=config["num_heads"], ffn_dim=config["ffn_dim"], num_layers=config["num_layers"], memory_enabled=config["memory_enabled"] ) self.pool = nn.AdaptiveAvgPool1d(1) self.classifier = nn.Linear(512, 1) def forward(self, input_ids): x = self.encoder(input_ids) x = self.pool(x.transpose(1, 2)).squeeze(-1) return self.classifier(x) # ✅ For loading models dynamically during mutation or feedback retrain def build_model_from_config(config): return EvoTransformerV22(config)