Advertisement
Felanpro

Neural Network latest

Dec 14th, 2022 (edited)
523
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 11.90 KB | Source Code | 0 0
  1. '''
  2. This neural network framework nudges both weights and biases in all layers when it performs backpropagation. It can handle multiple outputs and inputs. Working on implementing the relu activation function in backpropagation (try it out it should work)
  3. '''
  4.  
  5. import numpy as np
  6. import random
  7.  
  8. class Layer:
  9.     def __init__(self, inputNodes, outputNodes):
  10.         self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  11.         self.biases = 0 + np.zeros((1, outputNodes))
  12.         #self.biases = 0.1 * np.random.randn(1, outputNodes)
  13.    
  14.     def forward(self, inputs):
  15.         self.output = np.dot(inputs, self.weights) + self.biases
  16. class Activation_ReLU:
  17.     def forward(self, inputs):
  18.         self.output = np.maximum(0, inputs)    
  19.  
  20.  
  21. def trainNetwork(batches, batchSize, epochs):
  22.     elementIterator = 0
  23.     #inputData = np.random.randint(1, 1000, batches*batchSize)
  24.     inputData = np.arange(0, batches*batchSize) + 1
  25.     desired = np.copy(inputData) * 2
  26.  
  27.     for epochIterator in range(epochs):
  28.         elementIterator = 0
  29.         for batchesIterator in range(batches):
  30.             sum_of_errors_averaged = 0
  31.             for batchSizeIterator in range(batchSize):
  32.                 layer1.forward(inputData[elementIterator])
  33.                 layer2.forward(layer1.output)
  34.                
  35.                 sum_of_errors_averaged += (layer2.output - desired[elementIterator])
  36.                 elementIterator += 1
  37.                
  38.             sum_of_errors_averaged /= batchSize
  39.             backwards([layer1, layer2], sum_of_errors_averaged)
  40.            
  41. learningRate = 0.0000001
  42. def backwards(network, error):
  43.     currentLayer = len(network) - 1
  44.  
  45.     #dError = 2*(network[currentLayer].output[0] - desired)
  46.     dError = 2 * error
  47.    
  48.     gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
  49.  
  50.     #This presumes the last layer is a normal one and not an activation
  51.     for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
  52.         gradients[currentLayer][neuronsPerLastLayer] = dError[neuronsPerLastLayer]    
  53.  
  54.     # Start backpropagation for the rest of the layer
  55.     while currentLayer > 0: # Per layer except last one that's connected to the network input
  56.         if currentLayer == 0:
  57.             '''
  58.            # Nudge the weights and biases in the first layer
  59.            for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  60.                network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  61.                for neuronPreviousLayer in range(len(input_)): # Per neuron in previous layer
  62.                    network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= input_[neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  63.            
  64.            currentLayer -= 1
  65.            '''
  66.             pass
  67.         else:
  68.             #Nudge the weights and biases
  69.             for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  70.                 #network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  71.                 for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
  72.                     network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate    
  73.            
  74.             # Calculate gradients for every neuron in the next layer you're going to adjust
  75.             if type(network[currentLayer - 1]) == Activation_ReLU:
  76.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  77.                     for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
  78.                         if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
  79.                             gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  80.                         else:
  81.                             gradients[currentLayer - 2][neuronPreviousLayer] = 0
  82.  
  83.                 currentLayer -= 2
  84.             else:
  85.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  86.                     for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  87.                         gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  88.                        
  89.                 currentLayer -= 1
  90.  
  91.     #print("Error: ", (network[len(network) - 1].output[0] - desired))
  92.     #error = network[len(network) - 1].output[0] - desired
  93.     #print("Gradients total: \n", gradients)
  94.        
  95.  
  96.  
  97. #Create neural network
  98. layer1 = Layer(1, 1)
  99. #layer1.biases = 1/layer1.biases
  100. layer1.weights = 1
  101. layer1.biases *= 0
  102.  
  103. layer2 = Layer(1, 1)
  104. activation2 = Activation_ReLU()
  105.  
  106. print("Layer 1 weights: ", layer1.weights)
  107. print("Layer 1 biases: ", layer1.biases)
  108. print("Layer 2 weights: ", layer2.weights)
  109. print("Layer 2 biases: ", layer2.biases)
  110.  
  111. trainNetwork(100, 32, 100)
  112.  
  113. print("Layer 1 weights: ", layer1.weights)
  114. print("Layer 1 biases: ", layer1.biases)
  115. print("Layer 2 weights: ", layer2.weights)
  116. print("Layer 2 biases: ", layer2.biases)
  117.  
  118. # Test the network
  119. testInput = 9999999999
  120. layer1.forward(testInput)
  121. layer2.forward(layer1.output)
  122.  
  123. print("Guess: ", layer2.output)
  124. print("Error", layer2.output - testInput * 2)
  125.  
  126.  
  127.  
  128.  
  129. --------------------------
  130.  
  131. '''
  132. This neural network framework nudges both weights and biases in all layers when it performs backpropagation. It can handle multiple outputs and inputs. Working on implementing the relu activation function in backpropagation (try it out it should work)
  133. '''
  134.  
  135. import numpy as np
  136. import random
  137.  
  138. class Layer:
  139.     def __init__(self, inputNodes, outputNodes):
  140.         self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  141.         self.biases = 0 + np.zeros((1, outputNodes))
  142.         #self.biases = 0.1 * np.random.randn(1, outputNodes)
  143.    
  144.     def forward(self, inputs):
  145.         self.output = np.dot(inputs, self.weights) + self.biases
  146. class Activation_ReLU:
  147.     def forward(self, inputs):
  148.         self.output = np.maximum(0, inputs)    
  149.  
  150.  
  151. def trainNetwork(batches, batchSize, epochs):
  152.     elementIterator = np.array([])
  153.     #inputData = np.random.randint(1, 1000, batches*batchSize)
  154.     inputData = np.arange(0, batches*batchSize) + 1
  155.     desired = np.copy(inputData) * 2
  156.  
  157.     for epochIterator in range(epochs):
  158.         elementIterator = 0
  159.         for batchesIterator in range(batches):
  160.             sum_of_errors_averaged = 0
  161.             for batchSizeIterator in range(batchSize):
  162.                 layer1.forward(inputData[elementIterator])
  163.                 layer2.forward(layer1.output)
  164.                
  165.                 sum_of_errors_averaged += (layer2.output - desired[elementIterator])
  166.                 elementIterator += 1
  167.                
  168.             sum_of_errors_averaged /= batchSize
  169.             backwards([layer1, layer2], sum_of_errors_averaged)
  170.            
  171. learningRate = 0.0000001
  172. def backwards(network, error):
  173.     currentLayer = len(network) - 1
  174.  
  175.     #dError = 2*(network[currentLayer].output[0] - desired)
  176.     dError = 2 * error
  177.    
  178.     gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
  179.  
  180.     #This presumes the last layer is a normal one and not an activation
  181.     for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
  182.         gradients[currentLayer][neuronsPerLastLayer] = dError[0][neuronsPerLastLayer]  
  183.  
  184.     # Start backpropagation for the rest of the layer
  185.     while currentLayer > 0: # Per layer except last one that's connected to the network input
  186.         if currentLayer == 0:
  187.             '''
  188.            # Nudge the weights and biases in the first layer
  189.            for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  190.                network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  191.                for neuronPreviousLayer in range(len(input_)): # Per neuron in previous layer
  192.                    network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= input_[neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  193.            
  194.            currentLayer -= 1
  195.            '''
  196.             pass
  197.         else:
  198.             #Nudge the weights and biases
  199.             for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  200.                 #network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  201.                 for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
  202.                     network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate    
  203.            
  204.             # Calculate gradients for every neuron in the next layer you're going to adjust
  205.             if type(network[currentLayer - 1]) == Activation_ReLU:
  206.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  207.                     for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
  208.                         if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
  209.                             gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  210.                         else:
  211.                             gradients[currentLayer - 2][neuronPreviousLayer] = 0
  212.  
  213.                 currentLayer -= 2
  214.             else:
  215.                 for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  216.                     for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  217.                         gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  218.                        
  219.                 currentLayer -= 1
  220.  
  221.     #print("Error: ", (network[len(network) - 1].output[0] - desired))
  222.     #error = network[len(network) - 1].output[0] - desired
  223.     #print("Gradients total: \n", gradients)
  224.        
  225.  
  226.  
  227. #Create neural network
  228. layer1 = Layer(2, 2)
  229. #layer1.biases = 1/layer1.biases
  230. layer1.weights = 1
  231. layer1.biases *= 0
  232.  
  233. layer2 = Layer(2, 2)
  234. activation2 = Activation_ReLU()
  235.  
  236. print("Layer 1 weights: ", layer1.weights)
  237. print("Layer 1 biases: ", layer1.biases)
  238. print("Layer 2 weights: ", layer2.weights)
  239. print("Layer 2 biases: ", layer2.biases)
  240.  
  241. trainNetwork(100, 32, 10)
  242.  
  243. print("Layer 1 weights: ", layer1.weights)
  244. print("Layer 1 biases: ", layer1.biases)
  245. print("Layer 2 weights: ", layer2.weights)
  246. print("Layer 2 biases: ", layer2.biases)
  247.  
  248. # Test the network
  249. testInput = np.array([333, 333])
  250. layer1.forward(testInput)
  251. layer2.forward(layer1.output)
  252.  
  253. print("Guess: ", layer2.output)
  254. print("Error", layer2.output - (testInput*2))
  255.  
  256.  
  257.  
  258.  
  259.  
  260.  
  261.  
  262.  
  263.  
  264.  
  265.  
  266.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement