Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Traceback (most recent call last):
- File "/current_project/robo-planet/torch_planet/main.py", line 117, in <module>
- transition_model = TransitionModel(args.belief_size, args.state_size, env.action_size, args.hidden_size, args.embedding_size, args.activation_function).to(device=args.device)
- File "/home/maximecb/.local/lib/python3.6/site-packages/torch/jit/__init__.py", line 952, in init_then_register
- _create_methods_from_stubs(self, methods)
- File "/home/maximecb/.local/lib/python3.6/site-packages/torch/jit/__init__.py", line 913, in _create_methods_from_stubs
- self._create_methods(defs, rcbs, defaults)
- RuntimeError:
- arguments for call are not valid:
- for operator aten::mul(Tensor self, Tensor other) -> Tensor:
- expected a value of type Tensor for argument 'self' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(Tensor self, Scalar other) -> Tensor:
- expected a value of type Tensor for argument 'self' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(Tensor self, Tensor other, *, Tensor out) -> Tensor:
- expected a value of type Tensor for argument 'self' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(int a, int b) -> int:
- expected a value of type int for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(float a, float b) -> float:
- expected a value of type float for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(int a, float b) -> float:
- expected a value of type int for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator aten::mul(float a, int b) -> float:
- expected a value of type float for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator mul(float a, Tensor b) -> Tensor:
- expected a value of type float for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
- for operator mul(int a, Tensor b) -> Tensor:
- expected a value of type int for argument 'a' but found Tensor[]
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamicsfor call at:
- @jit.script_method
- def forward(self, prev_state:torch.Tensor, actions:torch.Tensor, prev_belief:torch.Tensor, observations:Optional[torch.Tensor]=None, nonterminals:Optional[torch.Tensor]=None) -> List[torch.Tensor]:
- # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
- T = actions.size(0) + 1
- beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T
- ~~~~~~~~~~~~~~~~~~~~ <--- HERE
- beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
- # Loop over time sequence
- for t in range(T - 1):
- _state = prior_states[t] if observations is None else posterior_states[t] # Select appropriate previous state
- _state = _state if nonterminals is None else _state * nonterminals[t] # Mask if previous transition was terminal
- # Compute belief (deterministic hidden state)
- hidden = self.act_fn(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
- beliefs[t + 1] = self.rnn(hidden, beliefs[t])
- # Compute state prior by applying transition dynamics
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement