Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- class Encoder(nn.Module):
- def __init__(self, emb_matrix, hidden_size=64):
- super(Encoder, self).__init__()
- self.embedding, num_embeddings, embedding_dim = self.create_emb_layer(emb_matrix)
- self.hidden_size = hidden_size
- self.gru = nn.GRU(embedding_dim, hidden_size, num_layers=1,
- bidirectional=True, batch_first=True)
- def create_emb_layer(self, weights_matrix, non_trainable=False):
- num_embeddings, embedding_dim = weights_matrix.shape
- emb_layer = nn.Embedding(num_embeddings, embedding_dim)
- emb_layer.load_state_dict({'weight': torch.tensor(weights_matrix)})
- if non_trainable:
- emb_layer.weight.requires_grad = False
- return emb_layer, num_embeddings, embedding_dim
- def forward(self, X, X_lengths):
- # X = app vector
- embedded = self.embedding(X)
- embedded = torch.nn.utils.rnn.pack_padded_sequence(embedded, X_lengths, batch_first=True, enforce_sorted=False)
- output, hn = self.gru(embedded)
- #output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
- return torch.cat([*hn], dim=-1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement