Advertisement
Guest User

Untitled

a guest
Jun 19th, 2019
89
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 15.76 KB | None | 0 0
  1. Traceback (most recent call last):
  2. File "/current_project/robo-planet/torch_planet/main.py", line 117, in <module>
  3. transition_model = TransitionModel(args.belief_size, args.state_size, env.action_size, args.hidden_size, args.embedding_size, args.activation_function).to(device=args.device)
  4. File "/home/maximecb/.local/lib/python3.6/site-packages/torch/jit/__init__.py", line 952, in init_then_register
  5. _create_methods_from_stubs(self, methods)
  6. File "/home/maximecb/.local/lib/python3.6/site-packages/torch/jit/__init__.py", line 913, in _create_methods_from_stubs
  7. self._create_methods(defs, rcbs, defaults)
  8. RuntimeError:
  9. arguments for call are not valid:
  10.  
  11. for operator aten::mul(Tensor self, Tensor other) -> Tensor:
  12. expected a value of type Tensor for argument 'self' but found Tensor[]
  13. @jit.script_method
  14. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  15. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  16. T = actions.size(0) + 1
  17. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  18. ~~~~~~~~~~~~~~ <--- HERE
  19. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  20. # Loop over time sequence
  21. for t in range(T - 1):
  22. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  23. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  24. # Compute belief (deterministic hidden state)
  25. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  26. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  27. # Compute state prior by applying transition dynamics
  28. for operator aten::mul(Tensor self, Scalar other) -> Tensor:
  29. expected a value of type Tensor for argument 'self' but found Tensor[]
  30. @jit.script_method
  31. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  32. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  33. T = actions.size(0) + 1
  34. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  35. ~~~~~~~~~~~~~~ <--- HERE
  36. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  37. # Loop over time sequence
  38. for t in range(T - 1):
  39. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  40. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  41. # Compute belief (deterministic hidden state)
  42. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  43. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  44. # Compute state prior by applying transition dynamics
  45. for operator aten::mul(Tensor self, Tensor other, *, Tensor out) -> Tensor:
  46. expected a value of type Tensor for argument 'self' but found Tensor[]
  47. @jit.script_method
  48. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  49. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  50. T = actions.size(0) + 1
  51. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  52. ~~~~~~~~~~~~~~ <--- HERE
  53. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  54. # Loop over time sequence
  55. for t in range(T - 1):
  56. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  57. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  58. # Compute belief (deterministic hidden state)
  59. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  60. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  61. # Compute state prior by applying transition dynamics
  62. for operator aten::mul(int a, int b) -> int:
  63. expected a value of type int for argument 'a' but found Tensor[]
  64. @jit.script_method
  65. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  66. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  67. T = actions.size(0) + 1
  68. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  69. ~~~~~~~~~~~~~~ <--- HERE
  70. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  71. # Loop over time sequence
  72. for t in range(T - 1):
  73. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  74. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  75. # Compute belief (deterministic hidden state)
  76. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  77. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  78. # Compute state prior by applying transition dynamics
  79. for operator aten::mul(float a, float b) -> float:
  80. expected a value of type float for argument 'a' but found Tensor[]
  81. @jit.script_method
  82. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  83. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  84. T = actions.size(0) + 1
  85. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  86. ~~~~~~~~~~~~~~ <--- HERE
  87. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  88. # Loop over time sequence
  89. for t in range(T - 1):
  90. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  91. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  92. # Compute belief (deterministic hidden state)
  93. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  94. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  95. # Compute state prior by applying transition dynamics
  96. for operator aten::mul(int a, float b) -> float:
  97. expected a value of type int for argument 'a' but found Tensor[]
  98. @jit.script_method
  99. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  100. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  101. T = actions.size(0) + 1
  102. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  103. ~~~~~~~~~~~~~~ <--- HERE
  104. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  105. # Loop over time sequence
  106. for t in range(T - 1):
  107. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  108. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  109. # Compute belief (deterministic hidden state)
  110. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  111. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  112. # Compute state prior by applying transition dynamics
  113. for operator aten::mul(float a, int b) -> float:
  114. expected a value of type float for argument 'a' but found Tensor[]
  115. @jit.script_method
  116. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  117. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  118. T = actions.size(0) + 1
  119. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  120. ~~~~~~~~~~~~~~ <--- HERE
  121. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  122. # Loop over time sequence
  123. for t in range(T - 1):
  124. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  125. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  126. # Compute belief (deterministic hidden state)
  127. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  128. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  129. # Compute state prior by applying transition dynamics
  130. for operator mul(float a, Tensor b) -> Tensor:
  131. expected a value of type float for argument 'a' but found Tensor[]
  132. @jit.script_method
  133. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  134. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  135. T = actions.size(0) + 1
  136. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  137. ~~~~~~~~~~~~~~ <--- HERE
  138. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  139. # Loop over time sequence
  140. for t in range(T - 1):
  141. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  142. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  143. # Compute belief (deterministic hidden state)
  144. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  145. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  146. # Compute state prior by applying transition dynamics
  147. for operator mul(int a, Tensor b) -> Tensor:
  148. expected a value of type int for argument 'a' but found Tensor[]
  149. @jit.script_method
  150. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  151. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  152. T = actions.size(0) + 1
  153. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  154. ~~~~~~~~~~~~~~ <--- HERE
  155. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  156. # Loop over time sequence
  157. for t in range(T - 1):
  158. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  159. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  160. # Compute belief (deterministic hidden state)
  161. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  162. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  163. # Compute state prior by applying transition dynamicsfor call at:
  164. @jit.script_method
  165. def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
  166. # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
  167. T = actions.size(0) + 1
  168. beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
  169. ~~~~~~~~~~~~~~~~~~~~ <--- HERE
  170. beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
  171. # Loop over time sequence
  172. for t in range(T - 1):
  173. _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
  174. _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
  175. # Compute belief (deterministic hidden state)
  176. hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
  177. beliefs[t + 1] = self.rnn(hidden, beliefs[t])
  178. # Compute state prior by applying transition dynamics
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement