Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- '''
- This neural network framework only nudges weights when it performs backpropogation. Also it only works on 1 single network output.
- '''
- import numpy as np
- import random
- class Layer:
- def __init__(self, inputNodes, outputNodes):
- self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
- #self.weights = 2 + np.zeros((inputNodes, outputNodes))
- self.biases = np.zeros((1, outputNodes))
- def forward(self, inputs):
- self.output = np.dot(inputs, self.weights) + self.biases
- class Activation_ReLU:
- def forward(self, inputs):
- self.output = np.maximum(0, inputs)
- learningRate = 0.0001
- def backwards(network, input_, desired):
- currentLayer = len(network) - 1
- dError = 2*(network[currentLayer].output - desired)
- gradients = np.zeros((len(network), 5))
- gradients[currentLayer][0] = dError
- currentLayer = len(network) - 1
- while currentLayer >= 0: # Per layer
- print("Current layer: ", currentLayer + 1, "--------------------------------------")
- if type(network[currentLayer - 1]) == Activation_ReLU:
- pass
- else:
- if currentLayer != 0:
- #Nudge the weights
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- #print("Neuron ", neuronCurrentLayer + 1, ": ")
- for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
- network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
- #print(network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer])
- # Calculate gradients for every neuron in the next layer you're going to adjust
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- #print("Neuron ", neuronCurrentLayer + 1, ": ")
- for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
- gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
- '''
- else: #Special for first layer
- for outputNodessss in range(len(network[0].output[0])):
- print("Neuron ", outputNodessss + 1, ": ")
- for inputNodessss in range(len(input_)):
- print(network[0].weights[inputNodessss][outputNodessss])
- '''
- '''
- print("Weights: ", network[currentLayer].weights)
- print("Layer output: ", network[currentLayer].output[0])
- print("Gradients: ", gradients[currentLayer])
- '''
- currentLayer -= 1 #Go to previous layer
- '''
- print("-----------------------------------")
- print("Gradients total: ")
- print(gradients)
- '''
- print("Error: ", (network[len(network) - 1].output[0] - desired))
- #Create neural network
- inputs = [4, 6, 1, 3, 9, 2, 3, 7, 10, 34]
- desired = [8, 12, 2, 6, 18, 4, 6, 14, 20, 68]
- layer1 = Layer(1, 3)
- activation1 = Activation_ReLU()
- layer2 = Layer(3, 3)
- activation2 = Activation_ReLU()
- layer3 = Layer(3, 1)
- #Train the network
- for x in range(500):
- for iteration in range(10):
- layer1.forward(inputs[iteration])
- layer2.forward(layer1.output)
- layer3.forward(layer2.output)
- backwards([layer1, layer2, layer3], inputs[iteration], desired[iteration])
- #Test the network
- userInput = 49
- layer1.forward(userInput)
- layer2.forward(layer1.output)
- layer3.forward(layer2.output)
- print("Guess: ", layer3.output)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement