Advertisement
Guest User

Untitled

a guest
Oct 18th, 2019
122
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.87 KB | None | 0 0
  1. ---------------------------------------------------------------------------
  2. RuntimeError Traceback (most recent call last)
  3. <ipython-input-73-c3dd6f60d80f> in <module>
  4. 23
  5. 24 # Evaluate untrained policy
  6. ---> 25 evaluations= [evaluate_policy(local_env, policy)]
  7. 26
  8. 27 total_timesteps = 0
  9.  
  10. /duckietown/utils/helpers.py in evaluate_policy(env, policy, eval_episodes, max_timesteps)
  11. 216 step = 0
  12. 217 while not done and step < max_timesteps:
  13. --> 218 action = policy.predict(np.array(obs))
  14. 219 obs, reward, done, _ = env.step(action)
  15. 220 avg_reward += reward
  16.  
  17. <ipython-input-70-08969eb656c1> in predict(self, state)
  18. 24 state = torch.FloatTensor(np.expand_dims(state, axis=0)).to(device)
  19. 25 print(state)
  20. ---> 26 return self.actor(state).cpu().data.numpy().flatten() # Make back into column?
  21. 27
  22. 28 def train(self, replay_buffer, iterations, batch_size=64, discount=0.99):
  23.  
  24. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  25. 487 result = self._slow_forward(*input, **kwargs)
  26. 488 else:
  27. --> 489 result = self.forward(*input, **kwargs)
  28. 490 for hook in self._forward_hooks.values():
  29. 491 hook_result = hook(self, input, result)
  30.  
  31. <ipython-input-61-a7209547df79> in forward(self, x)
  32. 34 x = x.view(x.size(0), -1) # flatten, does not modify size
  33. 35 x = self.dropout(x) # Make some elements zero (regularlization), does not modify size.
  34. ---> 36 x = self.relu(self.lin1(x)) # Therefore this must accept 32. BUG self.lr changes to self.relu
  35. 37
  36. 38 x = self.lin2(x)
  37.  
  38. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  39. 487 result = self._slow_forward(*input, **kwargs)
  40. 488 else:
  41. --> 489 result = self.forward(*input, **kwargs)
  42. 490 for hook in self._forward_hooks.values():
  43. 491 hook_result = hook(self, input, result)
  44.  
  45. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/linear.py in forward(self, input)
  46. 65 @weak_script_method
  47. 66 def forward(self, input):
  48. ---> 67 return F.linear(input, self.weight, self.bias)
  49. 68
  50. 69 def extra_repr(self):
  51.  
  52. /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
  53. 1350 if input.dim() == 2 and bias is not None:
  54. 1351 # fused op is marginally faster
  55. -> 1352 ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
  56. 1353 else:
  57. 1354 output = input.matmul(weight.t())
  58.  
  59. RuntimeError: size mismatch, m1: [1 x 31968], m2: [32 x 100] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:940
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement