Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- class RNN(nn.Module):
- def __init__(self, input_size, hidden_size1, hidden_size2, final_layer, num_layers, output_size, batch_size):
- super(RNN, self).__init__()
- self.hidden_size1 = hidden_size1
- self.hidden_size2 = hidden_size2
- self.batch_size = batch_size
- self.num_layers = num_layers
- self.lstm = nn.LSTM(input_size, hidden_size1, num_layers, batch_first=True).cuda()
- self.dense1 = nn.Linear(hidden_size1, output_size).cuda()
- self.tanh1 = nn.Tanh()
- self.dense2 = nn.Linear(hidden_size2, final_layer).cuda()
- self.tanh2 = nn.Tanh()
- self.dense3 = nn.Linear(final_layer, 1).cuda()
- #self.fc = nn.Linear(hidden)
- def forward(self, x):
- # Set initial hidden and cell states
- h0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size1).to(device)
- c0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size1).to(device)
- # print(h0)
- # print(x.shape)
- # print(c0)
- #y = np.zeros((self.batch_size, input_size, input_size))
- x = x.view(self.batch_size, x.shape[1], 1)
- hidden = (h0, c0)
- # Forward propagate LSTM
- #print("h0.shape and c0.shape" + str(h0.shape))
- #print("x.shape" + str(x.shape))
- out, _ = self.lstm(x, hidden) # out: tensor of shape (batch_size, seq_length, hidden_size)
- print("out.shape:")
- print(out.shape)
- # Decode the hidden state of the last time step
- #print("after lstm:" + str(out.shape))
- out = self.dense1(out)
- out = self.tanh1(out)
- #print("after first dense layer:" + str(out.shape))
- out = out.view(batch_size, x.shape[1])
- out = self.dense2(out)
- out = self.tanh2(out)
- #print("after second dense layer:" + str(out.shape))
- #print(out.view(-1).shape)
- out = self.dense3(out)
- out = out.view(batch_size)
- return out
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement