Guest User

Untitled

a guest
Nov 24th, 2017
93
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.41 KB | None | 0 0
  1. import numpy as np
  2.  
  3. sigmoid = lambda x: 1 / (1 + np.exp(-x))
  4.  
  5. def sigmoid_derivative(x):
  6. s = sigmoid(x)
  7. return s * (1 - s)
  8.  
  9. # reshaping an image -> vector
  10. def image2vector(image):
  11. """
  12. Argument:
  13. image -- a numpy array of shape (length, height, depth)
  14.  
  15. Returns:
  16. v -- a vector of shape (length*height*depth, 1)
  17. """
  18.  
  19. ### START CODE HERE ### (≈ 1 line of code)
  20. return image.reshape(image.shape[0] * image.shape[1] * image.shape[2], 1)
  21.  
  22. # for a set of images
  23. images2vector = lambda image_set: image_set.reshape(image_set.shape[0], -1).T
  24.  
  25. # Keep in mind that you can unroll to RGBRGBRGB or RRRGGGBBB
  26. # It doesn't matter - as long as you're consistent through-out
  27.  
  28. # gradient descent converges faster after normalization
  29. normalizeRows = lambda x: x / np.linalg.norm(x,axis=1,keepdims=True)
  30.  
  31. # You can think of softmax as a normalizing function used when your algorithm needs to classify two or more classes.
  32. def softmax(x):
  33. x_exp = np.exp(x)
  34. return x_exp / np.sum(x_exp, axis=1, keepdims=True)
  35.  
  36. # L1 loss is used to evaluate the performance of your model.
  37. # The bigger your loss is, the more different your predictions (yhat) are from the true values (y).
  38. # In deep learning, you use optimization algorithms like
  39. # Gradient Descent to train your model and to minimize the cost.
  40. L1 = lambda yhat, y: np.sum(np.abs(y - yhat))
  41.  
  42. # L2 loss
  43. L2 = lambda yhat, y: np.sum(np.dot(y - yhat, y - yhat))
Add Comment
Please, Sign In to add comment