Advertisement
Felanpro

Latest Creation

Dec 12th, 2022
760
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 5.69 KB | Source Code | 0 0
  1. '''
  2. This neural network framework nudges both weights and biases in all layers when it performs backpropagation. It can handle multiple outputs and inputs. Working on implementing the relu activation function in backpropagation (try it out it should work)
  3. '''
  4.  
  5. import numpy as np
  6. import random
  7.  
  8. class Layer:
  9.     def __init__(self, inputNodes, outputNodes):
  10.         self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  11.         self.biases = 0 + np.zeros((1, outputNodes))
  12.         #self.biases = 0.1 * np.random.randn(1, outputNodes)
  13.    
  14.     def forward(self, inputs):
  15.         self.output = np.dot(inputs, self.weights) + self.biases
  16. class Activation_ReLU:
  17.     def forward(self, inputs):
  18.         self.output = np.maximum(0, inputs)    
  19.  
  20. def trainNetwork(batches, batchSize, epochs):
  21.     elementIterator = 0
  22.     inputData = np.random.randint(1, 1000000, batches*batchSize)
  23.     desired = np.copy(inputData) * 2
  24.  
  25.     for epochIterator in range(epochs):
  26.         elementIterator = 0
  27.         for batchesIterator in range(batches):
  28.             sum_of_errors_averaged = 0
  29.             for batchSizeIterator in range(batchSize):
  30.                 layer1.forward(inputData[elementIterator])
  31.                 layer2.forward(layer1.output)
  32.                 layer3.forward(layer2.output)
  33.                 layer4.forward(layer3.output)
  34.                 sum_of_errors_averaged += (layer4.output - desired[elementIterator])
  35.                
  36.                 elementIterator += 1
  37.                
  38.             sum_of_errors_averaged /= batchSize
  39.             backwards([layer1, layer2, layer3, layer4], [inputData[elementIterator - 1]], sum_of_errors_averaged)
  40.            
  41. learningRate = 0.00000001
  42. def backwards(network, input_, error):
  43.     currentLayer = len(network) - 1
  44.  
  45.     #dError = 2*(network[currentLayer].output[0] - desired)
  46.     dError = error
  47.    
  48.     gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
  49.  
  50.     #This presumes the last layer is a normal one and not an activation
  51.     for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
  52.         gradients[currentLayer][neuronsPerLastLayer] = dError[neuronsPerLastLayer]    
  53.  
  54.     # Start backpropagation for the rest of the layer
  55.     while currentLayer > 0: # Per layer except last one that's connected to the network input
  56.         if currentLayer == 0:
  57.             '''
  58.            # Nudge the weights and biases in the first layer
  59.            for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  60.                network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  61.                for neuronPreviousLayer in range(len(input_)): # Per neuron in previous layer
  62.                    network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= input_[neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  63.            
  64.            currentLayer -= 1
  65.            '''
  66.             pass
  67.         else:
  68.             #Nudge the weights and biases
  69.             for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  70.                 network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  71.                 for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
  72.                     network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate    
  73.            
  74.             # Calculate gradients for every neuron in the next layer you're going to adjust
  75.             if type(network[currentLayer - 1]) == Activation_ReLU:
  76.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  77.                     for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
  78.                         if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
  79.                             gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  80.                         else:
  81.                             gradients[currentLayer - 2][neuronPreviousLayer] = 0
  82.  
  83.                 currentLayer -= 2
  84.             else:
  85.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  86.                     for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  87.                         gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  88.                        
  89.                 currentLayer -= 1
  90.  
  91.     #print("Error: ", (network[len(network) - 1].output[0] - desired))
  92.     #error = network[len(network) - 1].output[0] - desired
  93.     #print("Gradients total: \n", gradients)
  94.        
  95.  
  96.  
  97. #Create neural network
  98. layer1 = Layer(1, 3)
  99.  
  100. layer2 = Layer(3, 3)
  101. activation2 = Activation_ReLU()
  102.  
  103. layer3 = Layer(3, 3)
  104. activation3 = Activation_ReLU()
  105.  
  106. layer4 = Layer(3, 1)
  107.  
  108.  
  109. #Train the network
  110. trainNetwork(10, 3, 1)
  111.  
  112. testInput = 333
  113. layer1.forward(testInput)
  114. layer2.forward(layer1.output)
  115. layer3.forward(layer2.output)
  116. layer4.forward(layer3.output)
  117. print(layer4.output)
  118.  
  119.  
  120.  
  121.  
  122.  
  123.  
  124.  
  125.  
  126.  
  127.  
  128.  
  129.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement