Guest User

Untitled

a guest
Feb 19th, 2018
71
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.78 KB | None | 0 0
  1. import torch
  2. import torch.nn as nn
  3. from torch.autograd import Variable
  4.  
  5. torch.backends.cudnn.enabled=True
  6.  
  7. seq_size = 2
  8. hidden_size = 3
  9. inp_size = 4
  10. batch_size = 5
  11.  
  12. num_layers = 2
  13. bias = False
  14. batch_first = False
  15. dropout = 0.0
  16. bidirectional = True
  17.  
  18. print("Seq_size", seq_size)
  19. print("hidden_size", hidden_size)
  20. print("inp_size", inp_size)
  21. print("batch_size", batch_size)
  22. print("num_layers", num_layers)
  23.  
  24. inp = torch.cuda.FloatTensor(seq_size, batch_size, inp_size).uniform_()
  25.  
  26. pytorch_lstm = nn.LSTM(inp_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional).cuda()
  27.  
  28.  
  29. pyt_out, (pyt_hx, pyt_cx) = pytorch_lstm(Variable(inp))
  30.  
  31. pyt_out.sum().backward()
  32.  
  33.  
  34. for layer in pytorch_lstm._all_weights:
  35. for weight in layer:
  36. print(weight, getattr(pytorch_lstm, weight).size())
Add Comment
Please, Sign In to add comment