Advertisement
Felanpro

latest

Dec 12th, 2022
45
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.57 KB | None | 0 0
  1. '''
  2. This neural network framework nudges both weights and biases in all layers when it performs backpropagation. It can handle multiple outputs and inputs. Working on implementing the relu activation function in backpropagation (try it out it should work)
  3. '''
  4.  
  5. import numpy as np
  6. import random
  7.  
  8. class Layer:
  9. def __init__(self, inputNodes, outputNodes):
  10. self.weights = 0.1 * np.random.randn(inputNodes, outputNodes)
  11. self.biases = 1 + np.zeros((1, outputNodes))
  12. #self.biases = 0.1 * np.random.randn(1, outputNodes)
  13.  
  14. def forward(self, inputs):
  15. self.output = np.dot(inputs, self.weights) + self.biases
  16. class Activation_ReLU:
  17. def forward(self, inputs):
  18. self.output = np.maximum(0, inputs)
  19.  
  20. def trainNetwork(batches, batchSize, epochs):
  21. elementIterator = 0
  22. #inputData = np.random.randint(1, 1000, batches*batchSize)
  23. inputData = np.arange(0, batches*batchSize)
  24. desired = np.copy(inputData) * 2
  25.  
  26. for epochIterator in range(epochs):
  27. elementIterator = 0
  28. for batchesIterator in range(batches):
  29. sum_of_errors_averaged = 0
  30. for batchSizeIterator in range(batchSize):
  31. layer1.forward(inputData[elementIterator])
  32. layer2.forward(layer1.output)
  33.  
  34. sum_of_errors_averaged += (layer2.output - desired[elementIterator])
  35. elementIterator += 1
  36.  
  37. sum_of_errors_averaged /= batchSize
  38. backwards([layer1, layer2], sum_of_errors_averaged)
  39.  
  40. learningRate = 0.000000001
  41. def backwards(network, error):
  42. currentLayer = len(network) - 1
  43.  
  44. #dError = 2*(network[currentLayer].output[0] - desired)
  45. dError = error
  46.  
  47. gradients = np.zeros((len(network), 10)) #The digit here represent maximum number of neurons per layer
  48.  
  49. #This presumes the last layer is a normal one and not an activation
  50. for neuronsPerLastLayer in range(len(network[currentLayer].output[0])):
  51. gradients[currentLayer][neuronsPerLastLayer] = dError[neuronsPerLastLayer]
  52.  
  53. # Start backpropagation for the rest of the layer
  54. while currentLayer > 0: # Per layer except last one that's connected to the network input
  55. if currentLayer == 0:
  56. '''
  57. # Nudge the weights and biases in the first layer
  58. for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  59. network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  60. for neuronPreviousLayer in range(len(input_)): # Per neuron in previous layer
  61. network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= input_[neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  62.  
  63. currentLayer -= 1
  64. '''
  65. pass
  66. else:
  67. #Nudge the weights and biases
  68. for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  69. network[currentLayer].biases[0][neuronCurrentLayer] -= 1 * gradients[currentLayer][neuronCurrentLayer] * learningRate
  70. for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer/per weight per neuron in current layer
  71. network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] -= network[currentLayer - 1].output[0][neuronPreviousLayer] * gradients[currentLayer][neuronCurrentLayer] * learningRate
  72.  
  73. # Calculate gradients for every neuron in the next layer you're going to adjust
  74. if type(network[currentLayer - 1]) == Activation_ReLU:
  75. for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  76. for neuronPreviousLayer in range(len(network[currentLayer - 2].output[0])): # Per neuron in previous normal layer (skips activation layer)
  77. if(network[currentLayer - 2].output[0][neuronPreviousLayer] > 0):
  78. gradients[currentLayer - 2][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  79. else:
  80. gradients[currentLayer - 2][neuronPreviousLayer] = 0
  81.  
  82. currentLayer -= 2
  83. else:
  84. for neuronCurrentLayer in range(len(network[currentLayer].output[0])): # Per neuron in current layer
  85. for neuronPreviousLayer in range(len(network[currentLayer - 1].output[0])): # Per neuron in previous layer
  86. gradients[currentLayer - 1][neuronPreviousLayer] += network[currentLayer].weights[neuronPreviousLayer][neuronCurrentLayer] * gradients[currentLayer][neuronCurrentLayer]
  87.  
  88. currentLayer -= 1
  89.  
  90. #print("Error: ", (network[len(network) - 1].output[0] - desired))
  91. #error = network[len(network) - 1].output[0] - desired
  92. #print("Gradients total: \n", gradients)
  93.  
  94.  
  95.  
  96. #Create neural network
  97. layer1 = Layer(1, 1)
  98. layer1.biases = 1/layer1.biases
  99. layer1.weights = 1/layer1.weights
  100.  
  101. layer2 = Layer(1, 1)
  102. activation2 = Activation_ReLU()
  103.  
  104.  
  105. #Train the network
  106. trainNetwork(100, 32, 1)
  107.  
  108. testInput = 9817264
  109. layer1.forward(testInput)
  110. layer2.forward(layer1.output)
  111. print("Guess: ", layer2.output)
  112. print("Error", layer2.output - testInput*2)
  113.  
  114.  
  115.  
  116.  
  117.  
  118.  
  119.  
  120.  
  121.  
  122.  
  123.  
  124.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement