Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def loss(y, output):
- return (y - output)**2
- def d_loss(y, output):
- return 2*(output-y)
- class NeuralNetwork:
- def __init__(self, lr=0.01, beta=0.9):
- self.weights1 = np.random.rand(784,126)
- self.weights2 = np.random.rand(126,32)
- self.weights3 = np.random.rand(32,10)
- self.vel1 = 0.0
- self.vel2 = 0.0
- self.vel3 = 0.0
- self.lr = lr
- self.beta = beta
- def reinitialize(self):
- self.weights1 = np.random.rand(784, 126)
- self.weights2 = np.random.rand(126, 32)
- self.weights3 = np.random.rand(32, 10)
- self.vel1 = 0.0
- self.vel2 = 0.0
- self.vel3 = 0.0
- def feedforward(self, x, y):
- self.input = x
- self.y = y
- self.layer1 = sig(np.dot(self.input, self.weights1))
- self.layer2 = sig(np.dot(self.layer1, self.weights2))
- self.output = sig(np.dot(self.layer2, self.weights3))
- def backprop(self):
- A = d_loss(self.y, self.output) * d_sig(self.output)
- d_weights3 = np.dot(self.layer2.T, A )
- B = (np.dot(A, self.weights3.T) * d_sig(self.layer2))
- d_weights2 = np.dot(self.layer1.T, B )
- C = (np.dot(B, self.weights2.T) * d_sig(self.layer1))
- d_weights1 = np.dot(self.input.T, C )
- self.vel1 = self.beta * self.vel1 + self.lr * d_weights1
- self.vel2 = self.beta * self.vel2 + self.lr * d_weights2
- self.vel3 = self.beta * self.vel3 + self.lr * d_weights3
- self.weights1 -= self.vel1
- self.weights2 -= self.vel2
- self.weights3 -= self.vel3
- def prediction(self, x):
- self.feedforward(x, y=0)
- return self.output[0][0]
- def accuracy_test(self, X_test, Y_test):
- self.correct = 0
- self.total = 0
- for i in range(len(X_test)):
- pred = self.prediction(X_test[i]).argmax()
- if pred == Y_test[i]:
- self.correct += 1
- self.total += 1
- return self.correct/self.total * 100
- def ca_fonctionne(X):
- if X[0] < 0.1:
- if X[1] > 0.9:
- if X[2] > 0.9:
- if X[3] < 0.1:
- return True
- return False
- if __name__ == "__main__":
- total = 0
- correct = 0
- X = np.array([[0,0],
- [0,1],
- [1,0],
- [1,1]])
- Y = np.array([[0],[1],[1],[0]])
- taille_ensemble = len(X)
- n_test = 1000
- epoch = 10
- nn = NeuralNetwork()
- for test in range(n_test):
- nn.reinitialize()
- for i in range(epoch):
- x = np.array([X[i%taille_ensemble]])
- y = np.array([Y[i%taille_ensemble]])
- nn.feedforward(x,y)
- nn.backprop()
- t = [nn.prediction([[0,0]]), nn.prediction([[1, 0]]), nn.prediction([[0, 1]]), nn.prediction([[1, 1]])]
- if ca_fonctionne(t):
- correct += 1
- total += 1
- print('----------------------')
- print(correct/total * 100)
- print(ca_fonctionne(t))
- print(nn.prediction([[0, 0]]))
- print(nn.prediction([[1, 0]]))
- print(nn.prediction([[0, 1]]))
- print(nn.prediction([[1, 1]]))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement