Guest User

Untitled

a guest
Oct 20th, 2017
98
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.71 KB | None | 0 0
  1. def forward(self, x, y, n):
  2. """
  3. x: the input vector representig the input signal
  4. y: the target vector, the true output
  5. n: the lenght of vectors ``x`` and ``y``
  6. """
  7. o = self.propagateSignal(x)
  8. loss = self.objective.apply(y, o)/n
  9. penalty = self._lambda*np.sum([np.sum(np.linalg.norm(layer.W, 2)**2) for layer in self.nnet])
  10. return (loss + penalty)
  11.  
  12. def propagateSignal(self, x):
  13. _h = x
  14. for layer in self.nnet:
  15. _h = layer.apply(_h)
  16. return _h
  17.  
  18. def backward(self, y, n):
  19. # after the forward computation, compute the gradient on the output layer
  20. o = self.nnet[-1].h # last layer's output
  21. g = self.objective.gradient(y, o)/n
  22. for layer, grad in zip(reversed(self.nnet), reversed(self.gradients)): # starting backward we reconstruct error with respect to each weight
  23. # convert the gradient on the layer's output into a gradient into
  24. # the pre-nonlinearity activation (element-wise (``hadamard``) multiplication if f is elementwise)
  25. g = g*layer.activation.dh(layer.a)
  26. # compute gradients on weights and biases (including the regularization term,
  27. # where needed):
  28. grad.nablab += g
  29. grad.nablaW += np.outer(g, layer._h) + 2*self._lambda*layer.W
  30. # propagate the gradients w.r.t. the next lower-level hidden layer's activations
  31. g = np.dot(layer.W.T, g)
  32.  
  33. def descent(self):
  34. for layer, grad in zip(self.nnet, self.gradients):
  35. nablaW_new = self.alpha*grad.nablaW_old + self.eta*(grad.nablaW)
  36. nablab_new = self.alpha*grad.nablab_old + self.eta*(grad.nablab)
  37. layer.W = layer.W - nablaW_new
  38. layer.b = layer.b - nablab_new
  39. grad.nablaW_old, grad.nablab_old = nablaW_new, nablab_new
  40. # we clear-up the sum of gradients with respect the last seen example in the case of online mode or
  41. # in the last epoch (complete pass on dataset) for batch-mode
  42. for grad in self.gradients:
  43. grad.cleargrad()
  44.  
  45. def propagateSignal(self, x):
  46. _h = x
  47. for layer in self.nnet:
  48. _h = layer.apply(_h)
  49. return _h
  50.  
  51. def forward(self, x, y, n):
  52. """
  53. x: the input vector representig the input signal
  54. y: the target vector, the true output
  55. n: the lenght of vectors ``x`` and ``y``
  56. """
  57. o = self.propagateSignal(x)
  58. loss = self.objective.apply(y, o)/n
  59. penalty = self._lambda*np.sum([np.sum(np.linalg.norm(layer.W, 2)**2 + np.linalg.norm(layer.b))**2 for layer in self.nnet])
  60. return (loss + penalty)
  61.  
  62. o = self.nnet[-1].h # last layer's output
  63. g = self.objective.gradient(y, o)/n
  64.  
  65. g = g*layer.activation.dh(layer.a)
  66.  
  67. grad.nablab += g + 2*self._lambda*layer.b
  68.  
  69. grad.nablaW += np.outer(g, layer._h) + 2*self._lambda*layer.W
Add Comment
Please, Sign In to add comment