import torch import torch.nn as nn class LSTMClassifier(nn.Module): def __init__(self, vocab_size, embed_dim, hidden_dim, output_dim, padding_idx): super(LSTMClassifier, self).__init__() self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx) self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=1, dropout=0.3, batch_first=True, bidirectional=True) self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_dim, output_dim) def forward(self, x): embedded = self.embedding(x) output, (hidden, _) = self.lstm(embedded) hidden_cat = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1) # concatenate last hidden states x = self.fc1(hidden_cat) x = self.relu(x) out = self.fc2(x) return out