Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy
- from networkLayers import *
- from transferFunctions import *
- def der_sigmoid(x):
- return x * (1 - x)
- class NeuralNetwork(object):
- """
- A class which streamlines the output through layers and error calculation.
- """
- def __init__(self):
- """
- Initialize the layers of the neural network to an empty array.
- """
- self.layers = []
- def addLayer(self, layer):
- """
- Add a layer to the neural network in sequential fashion.
- """
- self.layers.append(layer)
- def output(self, x, all_outputs=False):
- """
- Calculate the output for a single input instance x (one row from
- the training or test set)
- """
- # For each layer - calcuate the output of that layer and use it
- # as input to the following layer. The method should return the
- # output of the last layer. The input to the first layer is the
- # vector x.
- output = x
- layers_output = []
- layers_output.append(output)
- for layer in self.layers:
- weights = layer.getWeights()
- biases = layer.getBiases()
- output = numpy.dot(output, weights) + biases
- output = 1/(1+numpy.exp(-output))
- layers_output.append(output)
- if all_outputs:
- return layers_output
- else:
- return output
- def outputs(self, X):
- """
- For a given vector of input instances X (the training or test set),
- return the vector of outputs for all the input instances.
- """
- # Input: vector X (train / test set)
- # Output: vector y_pred (predicted output values of the target function)
- outputs = []
- for x in X:
- outputs.append(self.output(x))
- return outputs
- def error(self, prediction, y):
- """
- Calculates the error for a single example in the train/test set.
- The default error is MSE (mean square error).
- """
- # Return the square error for a single example (the mean square error)
- # is calculated over all the training instances
- return (prediction - y) ** 2
- def total_error(self, predictions, Y):
- """
- Calculates the total error for ALL the examples in the train/test set.
- """
- # Input: vector of predicted values (predictions)
- # vector of actual values (Y) from the training / test set
- # Output: The Mean Square Error for all the instances
- # NOTE: The output HAS to be a single floating point value!
- error = 0
- for pred, y in zip(predictions, Y):
- error += self.error(pred, y)
- return error / self.size()
- def forwardStep(self, X, Y):
- """
- Run the inputs X (train/test set) through the network, and calculate
- the error on the given true target function values Y
- """
- outputs = self.outputs(X)
- return self.total_error(outputs, Y)
- def size(self):
- """
- Return the total number of weights in the network
- """
- totalSize = 0
- for layer in self.layers:
- totalSize += layer.size()
- return totalSize
- def getWeightsFlat(self):
- """
- Return a 1-d representation of all the weights in the network
- """
- flatWeights = np.array([])
- for layer in self.layers:
- flatWeights = np.append(flatWeights, layer.getWeightsFlat())
- return flatWeights
- def setWeights(self, flat_vector):
- """
- Set the weights for all layers in the network
- """
- # first layers come first in the flat vector
- for layer in self.layers:
- layer_weights = flat_vector[:layer.size()]
- layer.setWeights(layer_weights)
- flat_vector = flat_vector[layer.size():]
- def backPropagation(self, X, y, eta=1):
- # ones = numpy.ones((1, X.shape[0]))
- # pred = self.output(X, True)
- #
- # sigma = []
- # for k in reversed(range(0, len(self.layers))):
- # if k == (len(self.layers) - 1):
- # output_layer = self.layers[k]
- #
- # sigma.append(output_layer.sigma(pred[k+1], y))
- # weights = output_layer.getWeights()
- # biases = output_layer.getBiases().reshape(1, output_layer.getBiases().shape[0])
- # a = (pred[k].T.dot(sigma[-1]))
- # b = eta * a
- # weights += b
- # biases += numpy.multiply(eta, ones.dot(sigma[-1]))
- # self.layers[k].setWeightsAndBiases(weights, biases)
- #
- # else:
- # output_layer = self.layers[k]
- # sigma.append(output_layer.sigma(pred[k+1], sigma_k_plus_1=sigma[-1], w=self.layers[k+1].getWeights()))
- # weights = output_layer.getWeights()
- # biases = output_layer.getBiases().reshape(1, output_layer.getBiases().shape[0])
- # weights += eta * (pred[k].T.dot(sigma[-1]))
- # biases += eta * ones.dot(sigma[-1])
- # self.layers[k].setWeightsAndBiases(weights, biases)
- for i in range(1, len(self.layers) + 1):
- output = self.output(X)
- pred = self.output(X, True)
- if i == 1:
- error = y - output
- delta = error * der_sigmoid(output)
- else:
- # error = np.dot(self.weights[-i+1].T,delta)
- error = numpy.dot(delta, self.layers[-i+1].getWeights().T)
- delta = error * der_sigmoid(pred[-i + 1])
- dw = numpy.dot(pred[-i].T, delta)
- db = numpy.sum(delta, axis=0, keepdims=True)
- self.layers.weights[-i] += eta * dw
- self.layers.biases[-i] += eta * db
- def trainNetwork(self, X, y, type='batch', eta=1, maxiter=10000):
- if type is 'group':
- for i in range(0, maxiter):
- if i % 100 == 0:
- print(i)
- self.backPropagation(X, y, eta)
- if type is 'stohastic':
- for i in range(0, maxiter):
- for j in range(len(X)):
- self.backPropagation(X[j], [y[j]], eta)
- if type is 'batch':
- for i in range(0, maxiter):
- step = 0.2
- current = 0
- while (current < 1):
- begin = int(current * len(X))
- end = int((current + step) * len(X))
- X_batch = X[begin:end]
- y_batch = y[begin:end]
- self.backPropagation(X_batch, y_batch, eta)
- current = current + step
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement