Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from sklearn.datasets import make_moons
- import numpy as np
- import math
- # make random number generation repeatable
- np.random.seed(1)
- def activation(x, derivative=False):
- # if derivative=True, `x` is the output of the sigmoid function
- # and the derivative of the sigmoid function is s-(1-s)
- if (derivative == True):
- return x * (1 - x)
- return 1 / (1 + (math.e ** -x))
- X = np.array([[0.7, 0.8], [0.9, 0.7], [0.1, 0.2], [0.2, 0.3]])
- y = np.array([[1, 1, 0, 0]]).T
- # determine amount to update weights each iteration
- step = 0.01
- # w0 connects l0 & l1; shape must be (input_data.shape[1], l1.shape[0])
- # w1 connects l1 & l2; shape must be (w0.shape[1], 1)
- w0 = 2*np.random.random((X.shape[1], 4)) - 1
- w1 = 2*np.random.random((4, 1)) - 1
- # use full-batch training, passing full dataset in each iteration
- for i in range(10000):
- # forward pass - compute the output of each layer
- # nb: for hard classification, l2 > 0.5 = 1 otherwise = 0
- l0 = X
- l1 = activation( np.dot(l0, w0) )
- l2 = activation( np.dot(l1, w1) )
- # backward pass - measure cost and backpropagate through each layer
- # error is positive for observations whose output we must increase
- # and negative for observations whose output we must decrease
- gradient = (y - l2)
- l2_err = gradient * activation(l2, derivative=True)
- l1_err = np.dot(l2_err, w1.T) * activation(l1, derivative=True)
- # update each weight layer by its derivative wrt inherited error
- # because weights all * by layer values, the dw{i} = l{i} * err
- w1 += np.dot(l1.T, l2_err)
- w0 += np.dot(l0.T, l1_err)
- # provide periodic output
- if i % 1000 == 0:
- print( np.mean(np.abs(gradient) ) )
Add Comment
Please, Sign In to add comment