Advertisement
Felanpro

NeuralNetworkFrameworkBetaVersion2

Dec 9th, 2022
768
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.35 KB | Source Code | 0 0
  1. '''
  2. This neural network framework nudges both weights and biases when it performs backpropagation. Only works with 1 network output.
  3. '''
  4. import numpy as np
  5. import random
  6.  
  7. class Layer:
  8.     def __init__(self, inputNodes, outputNodes):
  9.         self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  10.         #self.weights = 2 + np.zeros((inputNodes, outputNodes))
  11.         self.biases = 1 + np.zeros((1, outputNodes))
  12.    
  13.     def forward(self, inputs):
  14.         self.output = np.dot(inputs, self.weights) + self.biases
  15.  
  16. class Activation_ReLU:
  17.     def forward(self, inputs):
  18.         self.output = np.maximum(0, inputs)    
  19.        
  20. learningRate = 0.0001
  21. def backwards(network, input_, desired):
  22.     currentLayer = len(network) - 1
  23.     dError = 2*(network[currentLayer].output - desired)
  24.    
  25.     gradients = np.zeros((len(network), 5))
  26.     gradients[currentLayer][0] = dError    
  27.                      
  28.     currentLayer = len(network) - 1
  29.     while currentLayer >= 0: # Per layer
  30.         if type(network[currentLayer - 1]) == Activation_ReLU:
  31.             pass
  32.         else:
  33.             if currentLayer != 0:
  34.                
  35.                 #Nudge the weights and biases
  36.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  37.                     #print("Neuron ", neuronCurrentLayer + 1, ": ")
  38.                     network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  39.                     for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  40.                         network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  41.                
  42.                
  43.                 # Calculate gradients for every neuron in the next layer you're going to adjust
  44.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  45.                     for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  46.                         gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]  
  47.                    
  48.         currentLayer -= 1 #Go to previous layer
  49.    
  50.     print("Error: ", (network[len(network) - 1].output[0] - desired))
  51.        
  52. #Create training data
  53. #inputs = [4, 6, 1, 3, 9, 2, 3, 7, 10, 34]
  54. #desired = [8, 12, 2, 6, 18, 4, 6, 14, 20, 68]
  55. inputs = [3, 7, 4, 9, 1, 3, 3, 8, 5, 2]
  56. desired = [6 + 3, 14 + 3, 8 + 3, 18 + 3, 2 + 3, 6 + 3, 6 + 3, 16 + 3, 10 + 3, 4 + 3]
  57.  
  58.  
  59. #Create neural network
  60. layer1 = Layer(1, 3)
  61.  
  62. layer2 = Layer(3, 3)
  63.  
  64. layer3 = Layer(3, 1)
  65.  
  66.  
  67. #Train the network
  68. for x in range(5000):
  69.     for iteration in range(10):
  70.         layer1.forward(inputs[iteration])
  71.         layer2.forward(layer1.output)
  72.         layer3.forward(layer2.output)
  73.         backwards([layer1, layer2, layer3], inputs[iteration], desired[iteration])
  74.        
  75. #Test the network
  76. userInput = 333
  77. layer1.forward(userInput)
  78. layer2.forward(layer1.output)
  79. layer3.forward(layer2.output)
  80. print("Guess: ", layer3.output)
  81.  
  82.  
  83.  
  84.  
  85.  
  86.  
  87.  
  88.  
  89.  
  90.  
  91.  
  92.  
  93.  
  94.  
  95.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement