Guest User

Untitled

a guest
Mar 13th, 2018
98
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.53 KB | None | 0 0
  1. if use_cuda:
  2. encoder = encoder.cuda()
  3. decoder = decoder.cuda()
  4.  
  5. encoder = nn.DataParallel(encoder, dim=0)
  6. decoder = nn.DataParallel(decoder, dim=0)
  7.  
  8. class EncoderRNN(nn.Module):
  9. def __init__(self, vocal_size, hidden_size):
  10. super(EncoderRNN, self).__init__()
  11. self.hidden_size = hidden_size
  12. self.embedding = nn.Embedding(vocal_size, hidden_size)
  13. self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
  14.  
  15. def forward(self, input_batch, input_batch_length, hidden):
  16. print(input_batch)
  17. print(input_batch_length)
  18. print(hidden)
  19. embedded = self.embedding(input_batch)
  20. packed_input = nn.utils.rnn.pack_padded_sequence(embedded, input_batch_length.cpu().numpy(), batch_first=True)
  21. output, hidden = self.gru(packed_input, hidden)
  22. return output, hidden
  23.  
  24. def init_hidden(self, batch_size):
  25. result = torch.autograd.Variable(torch.zeros(1, batch_size, self.hidden_size))
  26.  
  27. if use_cuda:
  28. return result.cuda()
  29. else:
  30. return result
  31.  
  32. Traceback (most recent call last):
  33. File "train.py", line 156, in <module>
  34. train_iteration(encoder, decoder, fileDataSet)
  35. File "train.py", line 122, in train_iteration
  36. target_indices, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)
  37. File "train.py", line 49, in train
  38. encoder_output, encoder_hidden = encoder(input_batch, input_batch_length, encoder_hidden)
  39. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
  40. result = self.forward(*input, **kwargs)
  41. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 74, in forward
  42. return self.gather(outputs, self.output_device)
  43. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 86, in gather
  44. return gather(outputs, output_device, dim=self.dim)
  45. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 65, in gather
  46. return gather_map(outputs)
  47. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 60, in gather_map
  48. return type(out)(map(gather_map, zip(*outputs)))
  49. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 60, in gather_map
  50. return type(out)(map(gather_map, zip(*outputs)))
  51. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/utils/rnn.py", line 39, in __new__
  52. return super(PackedSequence, cls).__new__(cls, *args[0])
  53. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 57, in gather_map
  54. return Gather.apply(target_device, dim, *outputs)
  55. File "/home/cjunjie/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 58, in forward
  56. assert all(map(lambda i: i.is_cuda, inputs))
  57. AssertionError
  58.  
  59. 2.0000e+00 6.2900e+02 5.4000e+01 ... 0.0000e+00 0.0000e+00 0.0000e+00
  60. 2.0000e+00 1.6759e+04 6.0000e+00 ... 0.0000e+00 0.0000e+00 0.0000e+00
  61. 2.0000e+00 7.2000e+01 3.3500e+02 ... 0.0000e+00 0.0000e+00 0.0000e+00
  62. 2.0000e+00 5.4000e+01 1.2900e+02 ... 0.0000e+00 0.0000e+00 0.0000e+00
  63. [torch.cuda.LongTensor of size (4,2687) (GPU 0)]
  64.  
  65. 1844
  66. 1507
  67. 1219
  68. 1021
  69. [torch.cuda.LongTensor of size (4,) (GPU 0)]
  70.  
  71. ( 0 ,.,.) =
  72. 0 0 0 ... 0 0 0
  73. 0 0 0 ... 0 0 0
  74. 0 0 0 ... 0 0 0
  75. 0 0 0 ... 0 0 0
  76. [torch.cuda.FloatTensor of size (1,4,256) (GPU 0)]
Add Comment
Please, Sign In to add comment