Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- sigmoid = lambda x: 1 / (1 + np.exp(-x))
- def sigmoid_derivative(x):
- s = sigmoid(x)
- return s * (1 - s)
- # reshaping an image -> vector
- def image2vector(image):
- """
- Argument:
- image -- a numpy array of shape (length, height, depth)
- Returns:
- v -- a vector of shape (length*height*depth, 1)
- """
- ### START CODE HERE ### (≈ 1 line of code)
- return image.reshape(image.shape[0] * image.shape[1] * image.shape[2], 1)
- # for a set of images
- images2vector = lambda image_set: image_set.reshape(image_set.shape[0], -1).T
- # Keep in mind that you can unroll to RGBRGBRGB or RRRGGGBBB
- # It doesn't matter - as long as you're consistent through-out
- # gradient descent converges faster after normalization
- normalizeRows = lambda x: x / np.linalg.norm(x,axis=1,keepdims=True)
- # You can think of softmax as a normalizing function used when your algorithm needs to classify two or more classes.
- def softmax(x):
- x_exp = np.exp(x)
- return x_exp / np.sum(x_exp, axis=1, keepdims=True)
- # L1 loss is used to evaluate the performance of your model.
- # The bigger your loss is, the more different your predictions (yhat) are from the true values (y).
- # In deep learning, you use optimization algorithms like
- # Gradient Descent to train your model and to minimize the cost.
- L1 = lambda yhat, y: np.sum(np.abs(y - yhat))
- # L2 loss
- L2 = lambda yhat, y: np.sum(np.dot(y - yhat, y - yhat))
Add Comment
Please, Sign In to add comment