Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- '''
- This neural network framework nudges both weights and biases in all layers when it performs backpropagation. It can handle multiple outputs and inputs. Working on implementing the relu activation function in backpropagation (try it out it should work)
- '''
- import numpy as np
- import random
- class Layer:
- def __init__(self, inputNodes, outputNodes):
- self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
- self.biases = 1 + np.zeros((1, outputNodes))
- #self.biases = 0.1 * np.random.randn(1, outputNodes)
- def forward(self, inputs):
- self.output = np.dot(inputs, self.weights) + self.biases
- class Activation_ReLU:
- def forward(self, inputs):
- self.output = np.maximum(0, inputs)
- def trainNetwork(batches, batchSize, epochs):
- elementIterator = 0
- #inputData = np.random.randint(1, 1000, batches*batchSize)
- inputData = np.arange(0, batches*batchSize)
- desired = np.copy(inputData) * 2
- for epochIterator in range(epochs):
- elementIterator = 0
- for batchesIterator in range(batches):
- sum_of_errors_averaged = 0
- for batchSizeIterator in range(batchSize):
- layer1.forward(inputData[elementIterator])
- layer2.forward(layer1.output)
- sum_of_errors_averaged += (layer2.output - desired[elementIterator])
- elementIterator += 1
- sum_of_errors_averaged /= batchSize
- backwards([layer1, layer2], sum_of_errors_averaged)
- learningRate = 0.000000001
- def backwards(network, error):
- currentLayer = len(network) - 1
- #dError = 2*(network[currentLayer].output[0] - desired)
- dError = error
- gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
- #This presumes the last layer is a normal one and not an activation
- for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
- gradients[currentLayer][neuronsPerLastLayer] = dError[neuronsPerLastLayer]
- # Start backpropagation for the rest of the layer
- while currentLayer > 0: # Per layer except last one that's connected to the network input
- if currentLayer == 0:
- '''
- # Nudge the weights and biases in the first layer
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
- for neuronPreviousLayer in range(len(input_)): # Per neuron in previous layer
- network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= input_[neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
- currentLayer -= 1
- '''
- pass
- else:
- #Nudge the weights and biases
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
- for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
- network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
- # Calculate gradients for every neuron in the next layer you're going to adjust
- if type(network[currentLayer - 1]) == Activation_ReLU:
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
- if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
- gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
- else:
- gradients[currentLayer - 2][neuronPreviousLayer] = 0
- currentLayer -= 2
- else:
- for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
- for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
- gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
- currentLayer -= 1
- #print("Error: ", (network[len(network) - 1].output[0] - desired))
- #error = network[len(network) - 1].output[0] - desired
- #print("Gradients total: \n", gradients)
- #Create neural network
- layer1 = Layer(1, 1)
- layer1.biases = 1/layer1.biases
- layer1.weights = 1/layer1.weights
- layer2 = Layer(1, 1)
- activation2 = Activation_ReLU()
- #Train the network
- trainNetwork(100, 32, 1)
- testInput = 9817264
- layer1.forward(testInput)
- layer2.forward(layer1.output)
- print("Guess: ", layer2.output)
- print("Error", layer2.output - testInput*2)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement