Advertisement
Guest User

Untitled

a guest
Dec 9th, 2018
88
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.59 KB | None | 0 0
  1. import numpy
  2. from networkLayers import *
  3. from transferFunctions import *
  4.  
  5.  
  6. def der_sigmoid(x):
  7. return x * (1 - x)
  8.  
  9.  
  10. class NeuralNetwork(object):
  11. """
  12. A class which streamlines the output through layers and error calculation.
  13. """
  14.  
  15. def __init__(self):
  16. """
  17. Initialize the layers of the neural network to an empty array.
  18. """
  19. self.layers = []
  20.  
  21. def addLayer(self, layer):
  22. """
  23. Add a layer to the neural network in sequential fashion.
  24. """
  25. self.layers.append(layer)
  26.  
  27. def output(self, x, all_outputs=False):
  28. """
  29. Calculate the output for a single input instance x (one row from
  30. the training or test set)
  31. """
  32.  
  33. # For each layer - calcuate the output of that layer and use it
  34. # as input to the following layer. The method should return the
  35. # output of the last layer. The input to the first layer is the
  36. # vector x.
  37.  
  38. output = x
  39. layers_output = []
  40. layers_output.append(output)
  41. for layer in self.layers:
  42. weights = layer.getWeights()
  43. biases = layer.getBiases()
  44. output = numpy.dot(output, weights) + biases
  45. output = 1/(1+numpy.exp(-output))
  46. layers_output.append(output)
  47. if all_outputs:
  48. return layers_output
  49. else:
  50. return output
  51.  
  52. def outputs(self, X):
  53. """
  54. For a given vector of input instances X (the training or test set),
  55. return the vector of outputs for all the input instances.
  56. """
  57.  
  58. # Input: vector X (train / test set)
  59. # Output: vector y_pred (predicted output values of the target function)
  60.  
  61. outputs = []
  62. for x in X:
  63. outputs.append(self.output(x))
  64. return outputs
  65.  
  66. def error(self, prediction, y):
  67. """
  68. Calculates the error for a single example in the train/test set.
  69. The default error is MSE (mean square error).
  70. """
  71.  
  72. # Return the square error for a single example (the mean square error)
  73. # is calculated over all the training instances
  74.  
  75. return (prediction - y) ** 2
  76.  
  77. def total_error(self, predictions, Y):
  78. """
  79. Calculates the total error for ALL the examples in the train/test set.
  80. """
  81.  
  82. # Input: vector of predicted values (predictions)
  83. # vector of actual values (Y) from the training / test set
  84. # Output: The Mean Square Error for all the instances
  85.  
  86. # NOTE: The output HAS to be a single floating point value!
  87.  
  88. error = 0
  89. for pred, y in zip(predictions, Y):
  90. error += self.error(pred, y)
  91. return error / self.size()
  92.  
  93. def forwardStep(self, X, Y):
  94. """
  95. Run the inputs X (train/test set) through the network, and calculate
  96. the error on the given true target function values Y
  97. """
  98.  
  99. outputs = self.outputs(X)
  100. return self.total_error(outputs, Y)
  101.  
  102. def size(self):
  103. """
  104. Return the total number of weights in the network
  105. """
  106. totalSize = 0
  107. for layer in self.layers:
  108. totalSize += layer.size()
  109. return totalSize
  110.  
  111. def getWeightsFlat(self):
  112. """
  113. Return a 1-d representation of all the weights in the network
  114. """
  115. flatWeights = np.array([])
  116. for layer in self.layers:
  117. flatWeights = np.append(flatWeights, layer.getWeightsFlat())
  118. return flatWeights
  119.  
  120. def setWeights(self, flat_vector):
  121. """
  122. Set the weights for all layers in the network
  123. """
  124. # first layers come first in the flat vector
  125. for layer in self.layers:
  126. layer_weights = flat_vector[:layer.size()]
  127. layer.setWeights(layer_weights)
  128. flat_vector = flat_vector[layer.size():]
  129.  
  130. def backPropagation(self, X, y, eta=1):
  131. # ones = numpy.ones((1, X.shape[0]))
  132. # pred = self.output(X, True)
  133. #
  134. # sigma = []
  135. # for k in reversed(range(0, len(self.layers))):
  136. # if k == (len(self.layers) - 1):
  137. # output_layer = self.layers[k]
  138. #
  139. # sigma.append(output_layer.sigma(pred[k+1], y))
  140. # weights = output_layer.getWeights()
  141. # biases = output_layer.getBiases().reshape(1, output_layer.getBiases().shape[0])
  142. # a = (pred[k].T.dot(sigma[-1]))
  143. # b = eta * a
  144. # weights += b
  145. # biases += numpy.multiply(eta, ones.dot(sigma[-1]))
  146. # self.layers[k].setWeightsAndBiases(weights, biases)
  147. #
  148. # else:
  149. # output_layer = self.layers[k]
  150. # sigma.append(output_layer.sigma(pred[k+1], sigma_k_plus_1=sigma[-1], w=self.layers[k+1].getWeights()))
  151. # weights = output_layer.getWeights()
  152. # biases = output_layer.getBiases().reshape(1, output_layer.getBiases().shape[0])
  153. # weights += eta * (pred[k].T.dot(sigma[-1]))
  154. # biases += eta * ones.dot(sigma[-1])
  155. # self.layers[k].setWeightsAndBiases(weights, biases)
  156.  
  157.  
  158. for i in range(1, len(self.layers) + 1):
  159. output = self.output(X)
  160. pred = self.output(X, True)
  161. if i == 1:
  162. error = y - output
  163. delta = error * der_sigmoid(output)
  164. else:
  165. # error = np.dot(self.weights[-i+1].T,delta)
  166. error = numpy.dot(delta, self.layers[-i+1].getWeights().T)
  167. delta = error * der_sigmoid(pred[-i + 1])
  168. dw = numpy.dot(pred[-i].T, delta)
  169. db = numpy.sum(delta, axis=0, keepdims=True)
  170. self.layers.weights[-i] += eta * dw
  171. self.layers.biases[-i] += eta * db
  172.  
  173.  
  174.  
  175. def trainNetwork(self, X, y, type='batch', eta=1, maxiter=10000):
  176. if type is 'group':
  177. for i in range(0, maxiter):
  178. if i % 100 == 0:
  179. print(i)
  180. self.backPropagation(X, y, eta)
  181.  
  182. if type is 'stohastic':
  183. for i in range(0, maxiter):
  184. for j in range(len(X)):
  185. self.backPropagation(X[j], [y[j]], eta)
  186.  
  187. if type is 'batch':
  188. for i in range(0, maxiter):
  189. step = 0.2
  190. current = 0
  191. while (current < 1):
  192. begin = int(current * len(X))
  193. end = int((current + step) * len(X))
  194. X_batch = X[begin:end]
  195. y_batch = y[begin:end]
  196. self.backPropagation(X_batch, y_batch, eta)
  197. current = current + step
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement