Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- ETA = 0.1
- class Layer:
- def __init__(self, input_size, output_size):
- self.input_size = input_size
- self.output_size = output_size
- self.weights = np.random.uniform(-0.4, 0.4, (input_size, output_size))
- self.biases = np.random.uniform(-0.4, 0.4, (1, output_size))
- self.outputs_ = None
- self.delta_ = None
- def sigmoid(self, x, derivative=False):
- sigm = 1. / (1. + np.exp(-x))
- if derivative:
- return sigm * (1. - sigm)
- return sigm
- def outputs(self, data):
- activation = np.matmul(data, self.weights) - self.biases
- self.outputs_ = self.sigmoid(activation)
- return self.outputs_
- class InputLayer(Layer):
- def __init__(self, input_size, output_size):
- super().__init__(input_size, output_size)
- # InputLayer only passes the data forward
- def outputs(self, data):
- return data
- def __repr__(self):
- return f"InputLayer {self.input_size} -> {self.output_size}"
- class HiddenLayer(Layer):
- def __init__(self, input_size, output_size):
- super().__init__(input_size, output_size)
- def calculate_delta(self, layer_before):
- y = self.outputs_
- self.delta_ = y * (1 - y) * np.matmul(layer_before.delta_, layer_before.weights.T)
- def update_weights(self, layer_before):
- y = self.outputs_
- self.weights += ETA * np.matmul(layer_before.delta_, y)
- self.biases += ETA * -np.sum(layer_before.delta_, axis=0)
- def __repr__(self):
- return f"HiddenLayer {self.input_size} -> {self.output_size}"
- class OutputLayer(Layer):
- def __init__(self, input_size, output_size):
- super().__init__(input_size, output_size)
- def calculate_delta(self, labels):
- y = self.outputs_
- self.delta_ = y * (1 - y) * (labels - y)
- def update_weights(self):
- y = self.outputs_
- self.weights += ETA * np.matmul(self.delta_, y)
- self.biases += ETA * -np.sum(self.delta_)
- def __repr__(self):
- return f"OutputLayer {self.input_size} -> {self.output_size}"
- class Algorithm:
- def __init__(self, layer_sizes):
- self.layer_sizes = layer_sizes
- self.layers = []
- self.add_layers(layer_sizes)
- def add_layers(self, layer_sizes):
- self.add_layer(InputLayer(layer_sizes[0], layer_sizes[0]))
- for i in range(0, len(layer_sizes) - 2):
- self.add_layer(HiddenLayer(layer_sizes[i], layer_sizes[i + 1]))
- self.add_layer(OutputLayer(layer_sizes[-2], layer_sizes[-1]))
- def add_layer(self, layer):
- self.layers.append(layer)
- def forward_pass(self, dataset):
- data = dataset
- for layer in self.layers:
- data = layer.outputs(data)
- return data
- def backwards_pass(self, labels):
- reversed_layers = list(reversed(self.layers))
- # Calculate deltas
- for i, layer in enumerate(reversed_layers):
- if type(layer) == OutputLayer:
- layer.calculate_delta(labels)
- elif type(layer) == HiddenLayer:
- layer_before = reversed_layers[i - 1]
- layer.calculate_delta(layer_before)
- # Update weights
- for i, layer in enumerate(reversed_layers):
- if type(layer) == OutputLayer:
- layer.update_weights()
- elif type(layer) == HiddenLayer:
- layer_before = reversed_layers[i - 1]
- layer.update_weights(layer_before)
- def calculate_error(self, dataset, labels):
- prediction = self.forward_pass(dataset)
- return 0.5 * np.mean((prediction - labels) ** 2)
- def predict(self, datapoint):
- datapoint = np.array(datapoint)
- output = self.forward_pass(datapoint)
- return output
- class BackpropAlg(Algorithm):
- def __init__(self, layer_sizes):
- super().__init__(layer_sizes)
- def train(self, dataset, labels, epochs = 1500):
- dataset = np.array(dataset)
- labels = np.array(labels)
- current_epoch = 1
- while current_epoch <= epochs:
- self.forward_pass(dataset)
- self.backwards_pass(labels)
- if current_epoch % 500 == 0:
- error = self.calculate_error(dataset, labels)
- print(f"Epoch {current_epoch} -> error: {error}")
- current_epoch += 1
- class StochasticBackpropAlg(Algorithm):
- def __init__(self, layer_sizes):
- super().__init__(layer_sizes)
- def train(self, dataset, labels, epochs = 1500):
- pass
- class MiniBatchBackpropAlg(Algorithm):
- def __init__(self, layer_sizes):
- super().__init__(layer_sizes)
- def train(self, dataset, labels, epochs = 1500):
- pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement