Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- from torch.autograd import Variable
- torch.manual_seed(1)
- class LSTM(nn.Module):
- def __init__(self, config):
- super(LSTM, self).__init__()
- self.config = config
- self.lstm = nn.LSTM(input_size=config["input_size"],
- hidden_size=config["hidden_size"],
- num_layers=config["num_layers"],
- dropout=config["dropout"],
- bidirectional=config["bidirectional"])
- def forward(self, inputs, lengths=None):
- batch_size = inputs.size()[0]
- if lengths is not None:
- inputs = torch.nn.utils.rnn.pack_padded_sequence(inputs, lengths, True)
- state_shape = self.config["num_cells"], batch_size, self.config["hidden_size"]
- h0 = c0 = Variable(inputs.data.data.new(*state_shape).zero_())
- outputs, (ht, ct) = self.lstm(inputs, (h0, c0))
- outputs, o_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs, True)
- o_lengths = Variable(torch.FloatTensor([o_lengths])).transpose(0, 1)
- if torch.cuda.is_available():
- o_lengths = o_lengths.cuda()
- if self.config["bidirectional"]:
- output = torch.sum(outputs, dim=1)
- output = output/o_lengths
- return output
- return ht[-1]
Add Comment
Please, Sign In to add comment