Guest User

Untitled

a guest
May 21st, 2018
71
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.54 KB | None | 0 0
  1. model = Sequential()
  2. model.add(Embedding(vocab_size, embedding_size, input_length=55, weights=[pretrained_weights]))
  3. model.add(Bidirectional(LSTM(units=len(X_train))))
  4. model.add(Dense(n_classes, activation='softmax'))
  5. model.compile(loss='categorical_crossentropy',
  6. optimizer = RMSprop(lr=0.0005),
  7. metrics=['accuracy'])
  8.  
  9. model.fit(np.array(X_train), np.array(y_train), epochs=100, validation_data=(np.array(X_val), np.array(y_val)))
  10.  
  11. M = torch.tensor(X_train)
  12.  
  13. # Bidirectional recurrent neural network (many-to-one)
  14. class BiRNN(nn.Module):
  15. def __init__(self, input_size, hidden_size, num_layers, num_classes):
  16. super(BiRNN, self).__init__()
  17. self.hidden_size = hidden_size
  18. self.num_layers = num_layers
  19. self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
  20. self.fc = nn.Linear(hidden_size*2, num_classes) # 2 for bidirection
  21.  
  22. def forward(self, x):
  23. # Set initial states
  24. h0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device) # 2 for bidirection
  25. c0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device)
  26.  
  27. # Forward propagate LSTM
  28. out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)
  29.  
  30. # Decode the hidden state of the last time step
  31. out = self.fc(out[:, -1, :])
  32. return out
  33.  
  34. model = BiRNN(input_size, hidden_size, num_layers, num_classes).to(device)
  35.  
  36. # Loss and optimizer
  37. criterion = nn.CrossEntropyLoss()
  38. optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
Add Comment
Please, Sign In to add comment