Advertisement
Felanpro

In Progress 2

Dec 11th, 2022
856
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 5.75 KB | Source Code | 0 0
  1. '''
  2. This neural network framework nudges both weights and biases when it performs backpropagation. It can handle multiple outputs. Working on implementing the relu activation function in backpropagation
  3. '''
  4.  
  5. import numpy as np
  6. import random
  7.  
  8. class Layer:
  9.     def __init__(self, inputNodes, outputNodes):
  10.         self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  11.         self.biases = 1 + np.zeros((1, outputNodes))
  12.    
  13.     def forward(self, inputs):
  14.         self.output = np.dot(inputs, self.weights) + self.biases
  15. class Activation_ReLU:
  16.     def forward(self, inputs):
  17.         self.output = np.maximum(0, inputs)    
  18.  
  19.  
  20. learningRate = 0.0000000001
  21. def backwards(network, input_, desired):
  22.     currentLayer = len(network) - 1
  23.  
  24.     dError = 2*(network[currentLayer].output[0] - desired)
  25.    
  26.     gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
  27.  
  28.     #This assumes the last layer is a normal one and not an activation
  29.     for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
  30.         gradients[currentLayer][neuronsPerLastLayer] = dError[neuronsPerLastLayer]    
  31.  
  32.     # Start backpropagation for the rest of the layer
  33.     while currentLayer > 0: # Per layer except last one that's connected to the network input
  34.  
  35.         #Nudge the weights and biases
  36.         for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  37.             network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  38.             for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
  39.                 network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate    
  40.        
  41.         # Calculate gradients for every neuron in the next layer you're going to adjust
  42.         if type(network[currentLayer - 1]) == Activation_ReLU:
  43.             for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  44.                 for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
  45.                     if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
  46.                         gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  47.                     else:
  48.                         gradients[currentLayer - 2][neuronPreviousLayer] = 0
  49.  
  50.             currentLayer -= 2
  51.         else:
  52.             for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  53.                 for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  54.                     gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  55.                    
  56.             currentLayer -= 1
  57.  
  58.     #print("Error: ", (network[len(network) - 1].output[0] - desired))
  59.     #error = network[len(network) - 1].output[0] - desired
  60.     #print("Gradients total: \n", gradients)
  61.  
  62.  
  63.  
  64.  
  65.        
  66. #Create training data
  67. #inputs = [3, 6, 2, 8, 12, 90, 45, 23, 88, 18]
  68. #desired = np.array([[6, 6], [12, 12], [4, 4], [16, 16], [24, 24], [180, 180], [90, 90], [46, 46], [176, 176], [36, 36]])
  69. #inputs = [4, 6, 1, 3, 9, 2, 3, 7, 10, 34]
  70. #desired = [8, 12, 2, 6, 18, 4, 6, 14, 20, 68]
  71.  
  72. inputs = []
  73. desired = []
  74.  
  75. for y in range(1000):
  76.     inputs.append(y + 1)
  77.    
  78. random.shuffle(inputs)
  79.  
  80. for y in range(1000):
  81.     desired.append(inputs[y] * 2)
  82.  
  83.  
  84. #Create neural network
  85. layer1 = Layer(1, 3)
  86. '''
  87. Layer1 weights aren't going to be affected by the backwards function, so juts set the weights to 1 if you want like this:
  88. layer1.weights *= 1/layer1.weights
  89. '''
  90. layer1.weights *= 1/layer1.weights
  91.  
  92. layer2 = Layer(3, 1)
  93. activation2 = Activation_ReLU()
  94.  
  95. layer3 = Layer(3, 3)
  96. activation3 = Activation_ReLU()
  97.  
  98. layer4 = Layer(3, 1)
  99.  
  100. #Train the network
  101. for samples_in_batch in range(100):
  102.     for x in range(len(inputs)):
  103.  
  104.         #With activations
  105.         '''
  106.        layer1.forward(inputs[x])
  107.        layer2.forward(layer1.output)
  108.        activation2.forward(layer2.output)
  109.        layer3.forward(activation2.output)
  110.        activation3.forward(layer3.output)
  111.        layer4.forward(activation3.output)
  112.    
  113.        backwards([layer1, layer2, activation2, layer3, activation3, layer4], inputs[x], desired[x])
  114.        '''
  115.  
  116.         #Without activations
  117.         layer1.forward(inputs[x])
  118.         layer2.forward(layer1.output)
  119.         '''
  120.        layer3.forward(layer2.output)
  121.        layer4.forward(layer3.output)
  122.        '''
  123.    
  124.         backwards([layer1, layer2], inputs[x], desired[x])        
  125.  
  126.  
  127. #Test the network
  128. testInput = 24
  129.  
  130. #With activations
  131. '''
  132. layer1.forward(testInput)
  133. layer2.forward(layer1.output)
  134. activation2.forward(layer2.output)
  135. layer3.forward(activation2.output)
  136. activation3.forward(layer3.output)
  137. layer4.forward(activation3.output)
  138. #backwards([layer1, layer2, activation2, layer3, activation3, layer4], testInput, 48)
  139.  
  140. '''
  141.  
  142. #Without activations
  143. layer1.forward(testInput)
  144. layer2.forward(layer1.output)
  145. '''
  146. layer3.forward(layer2.output)
  147. layer4.forward(layer3.output)
  148. '''
  149. #backwards([layer1, layer2, activation2, layer3, activation3, layer4], testInput, 48)
  150.  
  151.  
  152. print("Guess: ", layer2.output)
  153. print("Error: ", layer2.output - (testInput * 2))
  154.  
  155.  
  156.  
  157.  
  158.  
  159.  
  160.  
  161.  
  162.  
  163.  
  164.  
  165.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement