Advertisement
Guest User

Untitled

a guest
Dec 9th, 2018
70
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.18 KB | None | 0 0
  1. import numpy as np
  2.  
  3. ETA = 0.1
  4.  
  5. class Layer:
  6.     def __init__(self, input_size, output_size):
  7.         self.input_size = input_size
  8.         self.output_size = output_size
  9.         self.weights = np.random.uniform(-0.4, 0.4, (input_size, output_size))
  10.         self.biases = np.random.uniform(-0.4, 0.4,  (1, output_size))
  11.         self.outputs_ = None
  12.         self.delta_ = None
  13.  
  14.     def sigmoid(self, x, derivative=False):
  15.         sigm = 1. / (1. + np.exp(-x))
  16.         if derivative:
  17.             return sigm * (1. - sigm)
  18.         return sigm
  19.  
  20.     def outputs(self, data):
  21.         activation = np.matmul(data, self.weights) - self.biases
  22.         self.outputs_ = self.sigmoid(activation)
  23.         return self.outputs_
  24.  
  25. class InputLayer(Layer):
  26.     def __init__(self, input_size, output_size):
  27.         super().__init__(input_size, output_size)
  28.  
  29.     # InputLayer only passes the data forward
  30.     def outputs(self, data):
  31.         return data
  32.  
  33.     def __repr__(self):
  34.         return f"InputLayer {self.input_size} -> {self.output_size}"
  35.  
  36. class HiddenLayer(Layer):
  37.     def __init__(self, input_size, output_size):
  38.         super().__init__(input_size, output_size)
  39.  
  40.     def calculate_delta(self, layer_before):
  41.         y = self.outputs_
  42.         self.delta_ = y * (1 - y) * np.matmul(layer_before.delta_, layer_before.weights.T)
  43.  
  44.     def update_weights(self, layer_before):
  45.         y = self.outputs_
  46.         self.weights += ETA * np.matmul(layer_before.delta_, y)
  47.         self.biases += ETA * -np.sum(layer_before.delta_, axis=0)
  48.  
  49.     def __repr__(self):
  50.         return f"HiddenLayer {self.input_size} -> {self.output_size}"
  51.  
  52. class OutputLayer(Layer):
  53.     def __init__(self, input_size, output_size):
  54.         super().__init__(input_size, output_size)
  55.  
  56.     def calculate_delta(self, labels):
  57.         y = self.outputs_
  58.         self.delta_ = y * (1 - y) * (labels - y)
  59.  
  60.     def update_weights(self):
  61.         y = self.outputs_
  62.         self.weights += ETA * np.matmul(self.delta_, y)
  63.         self.biases += ETA * -np.sum(self.delta_)
  64.  
  65.     def __repr__(self):
  66.         return f"OutputLayer {self.input_size} -> {self.output_size}"
  67.  
  68. class Algorithm:
  69.     def __init__(self, layer_sizes):
  70.         self.layer_sizes = layer_sizes
  71.         self.layers = []
  72.         self.add_layers(layer_sizes)
  73.  
  74.     def add_layers(self, layer_sizes):
  75.         self.add_layer(InputLayer(layer_sizes[0], layer_sizes[0]))
  76.         for i in range(0, len(layer_sizes) - 2):
  77.             self.add_layer(HiddenLayer(layer_sizes[i], layer_sizes[i + 1]))
  78.         self.add_layer(OutputLayer(layer_sizes[-2], layer_sizes[-1]))
  79.  
  80.     def add_layer(self, layer):
  81.         self.layers.append(layer)
  82.  
  83.     def forward_pass(self, dataset):
  84.         data = dataset
  85.         for layer in self.layers:
  86.             data = layer.outputs(data)
  87.         return data
  88.  
  89.     def backwards_pass(self, labels):
  90.         reversed_layers = list(reversed(self.layers))
  91.         # Calculate deltas
  92.         for i, layer in enumerate(reversed_layers):
  93.             if type(layer) == OutputLayer:
  94.                 layer.calculate_delta(labels)
  95.             elif type(layer) == HiddenLayer:
  96.                 layer_before = reversed_layers[i - 1]
  97.                 layer.calculate_delta(layer_before)
  98.  
  99.         # Update weights
  100.         for i, layer in enumerate(reversed_layers):
  101.             if type(layer) == OutputLayer:
  102.                 layer.update_weights()
  103.             elif type(layer) == HiddenLayer:
  104.                 layer_before = reversed_layers[i - 1]  
  105.                 layer.update_weights(layer_before)
  106.  
  107.     def calculate_error(self, dataset, labels):
  108.         prediction = self.forward_pass(dataset)
  109.         return 0.5 * np.mean((prediction - labels) ** 2)
  110.  
  111.     def predict(self, datapoint):
  112.         datapoint = np.array(datapoint)
  113.         output = self.forward_pass(datapoint)
  114.         return output
  115.  
  116. class BackpropAlg(Algorithm):
  117.     def __init__(self, layer_sizes):
  118.         super().__init__(layer_sizes)
  119.  
  120.     def train(self, dataset, labels, epochs = 1500):
  121.         dataset = np.array(dataset)
  122.         labels = np.array(labels)
  123.         current_epoch = 1
  124.         while current_epoch <= epochs:
  125.             self.forward_pass(dataset)
  126.             self.backwards_pass(labels)
  127.             if current_epoch % 500 == 0:
  128.                 error = self.calculate_error(dataset, labels)
  129.                 print(f"Epoch {current_epoch} -> error: {error}")
  130.             current_epoch += 1
  131.  
  132. class StochasticBackpropAlg(Algorithm):
  133.     def __init__(self, layer_sizes):
  134.         super().__init__(layer_sizes)
  135.  
  136.     def train(self, dataset, labels, epochs = 1500):
  137.         pass
  138.  
  139. class MiniBatchBackpropAlg(Algorithm):
  140.     def __init__(self, layer_sizes):
  141.         super().__init__(layer_sizes)
  142.  
  143.     def train(self, dataset, labels, epochs = 1500):
  144.         pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement