Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- from torch.autograd import Variable
- torch.backends.cudnn.enabled=True
- seq_size = 2
- hidden_size = 3
- inp_size = 4
- batch_size = 5
- num_layers = 2
- bias = False
- batch_first = False
- dropout = 0.0
- bidirectional = True
- print("Seq_size", seq_size)
- print("hidden_size", hidden_size)
- print("inp_size", inp_size)
- print("batch_size", batch_size)
- print("num_layers", num_layers)
- inp = torch.cuda.FloatTensor(seq_size, batch_size, inp_size).uniform_()
- pytorch_lstm = nn.LSTM(inp_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional).cuda()
- pyt_out, (pyt_hx, pyt_cx) = pytorch_lstm(Variable(inp))
- pyt_out.sum().backward()
- for layer in pytorch_lstm._all_weights:
- for weight in layer:
- print(weight, getattr(pytorch_lstm, weight).size())
Add Comment
Please, Sign In to add comment