Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ---------------------------------------------------------------------------
- RuntimeError Traceback (most recent call last)
- <ipython-input-73-c3dd6f60d80f> in <module>
- 23
- 24 # Evaluate untrained policy
- ---> 25 evaluations= [evaluate_policy(local_env, policy)]
- 26
- 27 total_timesteps = 0
- /duckietown/utils/helpers.py in evaluate_policy(env, policy, eval_episodes, max_timesteps)
- 216 step = 0
- 217 while not done and step < max_timesteps:
- --> 218 action = policy.predict(np.array(obs))
- 219 obs, reward, done, _ = env.step(action)
- 220 avg_reward += reward
- <ipython-input-70-08969eb656c1> in predict(self, state)
- 24 state = torch.FloatTensor(np.expand_dims(state, axis=0)).to(device)
- 25 print(state)
- ---> 26 return self.actor(state).cpu().data.numpy().flatten() # Make back into column?
- 27
- 28 def train(self, replay_buffer, iterations, batch_size=64, discount=0.99):
- /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
- 487 result = self._slow_forward(*input, **kwargs)
- 488 else:
- --> 489 result = self.forward(*input, **kwargs)
- 490 for hook in self._forward_hooks.values():
- 491 hook_result = hook(self, input, result)
- <ipython-input-61-a7209547df79> in forward(self, x)
- 34 x = x.view(x.size(0), -1) # flatten, does not modify size
- 35 x = self.dropout(x) # Make some elements zero (regularlization), does not modify size.
- ---> 36 x = self.relu(self.lin1(x)) # Therefore this must accept 32. BUG self.lr changes to self.relu
- 37
- 38 x = self.lin2(x)
- /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
- 487 result = self._slow_forward(*input, **kwargs)
- 488 else:
- --> 489 result = self.forward(*input, **kwargs)
- 490 for hook in self._forward_hooks.values():
- 491 hook_result = hook(self, input, result)
- /usr/local/lib/python3.6/dist-packages/torch/nn/modules/linear.py in forward(self, input)
- 65 @weak_script_method
- 66 def forward(self, input):
- ---> 67 return F.linear(input, self.weight, self.bias)
- 68
- 69 def extra_repr(self):
- /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
- 1350 if input.dim() == 2 and bias is not None:
- 1351 # fused op is marginally faster
- -> 1352 ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
- 1353 else:
- 1354 output = input.matmul(weight.t())
- RuntimeError: size mismatch, m1: [1 x 31968], m2: [32 x 100] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:940
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement