Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import Layer
- import math
- learningRate = 0.05
- momentum = 0.9
- class Network:
- def __init__(self, layersArray):
- self.layers = layersArray
- def activationFun(self, values, function):
- x, y = values.shape
- result = np.asmatrix(np.zeros((x, y), dtype=float))
- if function == 'lin':
- for i in range(x):
- for j in range(y):
- result[i, j] = values[i, j]
- return result
- elif function == 'sig':
- for i in range(x):
- for j in range(y):
- result[i, j] = (1.0 / (1.0 + np.exp(-values[i, j])))
- return result
- def activationFunDerivative(self, values, function):
- x, y = values.shape
- result = np.asmatrix(np.zeros((x, y), dtype=float))
- if function == 'lin':
- for i in range(x):
- for j in range(y):
- result[i, j] = 1
- return result
- elif function == 'sig':
- temp = self.activationFun(values, 'sig')
- for i in range(x):
- for j in range(y):
- result[i, j] = temp[i, j] * (1.0 - temp[i, j])
- return result
- def forwardPropagate(self, inputMatrix):
- self.layers[0].input = inputMatrix
- if isinstance(self.layers[0], Layer.affineLayer):
- if (self.layers[0].isBias):
- self.layers[0].z = self.layers[0].weights * self.layers[0].input + self.layers[0].bias
- else:
- self.layers[0].z = self.layers[0].weights * self.layers[0].input
- self.layers[0].output = self.activationFun(self.layers[0].z, self.layers[0].activator)
- else:
- centroids = self.layers[0].centroids
- widths = self.layers[0].widths
- self.layers[0].output = np.asmatrix(np.zeros((centroids.shape[0], 1), dtype=float))
- for counter, centroid in enumerate(centroids):
- sum = 0
- for i in range(centroid.shape[1]):
- sum += pow(centroid[0, i] - inputMatrix[i, 0], 2)
- self.layers[0].output[counter, 0] = math.exp(-widths[counter, 0] * sum)
- for i in range(1, self.layers.size):
- self.layers[i].input = self.layers[i - 1].output
- if isinstance(self.layers[i], Layer.affineLayer):
- if (self.layers[i].isBias):
- self.layers[i].z = self.layers[i].weights * self.layers[i].input + self.layers[i].bias
- else:
- self.layers[i].z = self.layers[i].weights * self.layers[i].input
- self.layers[i].output = self.activationFun(self.layers[i].z, self.layers[i].activator)
- else:
- centroids = self.layers[i].centroids
- widths = self.layers[i].widths
- self.layers[i].output = np.asmatrix(np.zeros((centroids.shape[0], 1), dtype=float))
- for counter, centroid in enumerate(centroids):
- sum = 0
- for i in range(centroid.shape[1]):
- sum += pow(centroid[0, i] - inputMatrix[i, 0], 2)
- self.layers[0].output[counter, 0] = math.exp(-widths[counter, 0] * sum)
- def computeErrors(self, outputMatrix):
- self.layers[-1].error = outputMatrix - self.layers[-1].output
- for i in reversed(range(self.layers.size - 1)):
- if isinstance(self.layers[i], Layer.affineLayer):
- self.layers[i].error = self.layers[i + 1].weights.T * self.layers[i + 1].error
- else:
- continue
- def modifyWeights(self):
- for i in range(self.layers.size):
- if isinstance(self.layers[i], Layer.affineLayer):
- # WAGI NEURONOW
- self.layers[i].weightsChanges = learningRate * np.multiply(
- self.activationFunDerivative(self.layers[i].output, self.layers[i].activator),
- self.layers[i].error) * self.layers[i].input.T + momentum * self.layers[i].weightsChanges
- self.layers[i].weights = self.layers[i].weights + self.layers[i].weightsChanges
- # WAGI BIASU
- if (self.layers[i].isBias):
- self.layers[i].biasChanges = learningRate * np.multiply(
- self.activationFunDerivative(self.layers[i].output, self.layers[i].activator),
- self.layers[i].error) + momentum * self.layers[i].biasChanges
- self.layers[i].bias = self.layers[i].bias + self.layers[i].biasChanges
- else:
- continue
- def simulate(self, inputMatrix, outputMatrix):
- self.forwardPropagate(inputMatrix)
- self.computeErrors(outputMatrix)
- self.modifyWeights()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement