Advertisement
Guest User

Untitled

a guest
Sep 17th, 2019
113
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 1.21 KB | None | 0 0
  1. class Encoder(nn.Module):
  2.     def __init__(self, emb_matrix, hidden_size=64):
  3.         super(Encoder, self).__init__()
  4.        
  5.         self.embedding, num_embeddings, embedding_dim = self.create_emb_layer(emb_matrix)
  6.         self.hidden_size = hidden_size
  7.         self.gru = nn.GRU(embedding_dim, hidden_size, num_layers=1,
  8.                           bidirectional=True, batch_first=True)
  9.        
  10.    
  11.     def create_emb_layer(self, weights_matrix, non_trainable=False):
  12.         num_embeddings, embedding_dim = weights_matrix.shape
  13.         emb_layer = nn.Embedding(num_embeddings, embedding_dim)
  14.         emb_layer.load_state_dict({'weight': torch.tensor(weights_matrix)})
  15.         if non_trainable:
  16.             emb_layer.weight.requires_grad = False
  17.  
  18.         return emb_layer, num_embeddings, embedding_dim
  19.        
  20.  
  21.     def forward(self, X, X_lengths):
  22.         # X = app vector
  23.         embedded = self.embedding(X)
  24.        
  25.         embedded = torch.nn.utils.rnn.pack_padded_sequence(embedded, X_lengths, batch_first=True, enforce_sorted=False)
  26.         output, hn = self.gru(embedded)
  27.         #output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
  28.         return torch.cat([*hn], dim=-1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement