Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from math import exp
- from random import random
- class Network:
- def __init__(self, n_inputs, n_hidden, n_outputs, n_epoch, learning_rate):
- self.layers = []
- self.n_outputs = n_outputs
- self.hidden_layer = [{'weights': [random() for _ in range(n_inputs + 1)]} for _ in range(n_hidden)]
- self.layers.append(self.hidden_layer)
- self.output_layer = [{'weights': [random() for _ in range(n_hidden + 1)]} for _ in range(n_outputs)]
- self.layers.append(self.output_layer)
- self.learning_rate = learning_rate
- self.epochs = n_epoch
- @staticmethod
- def activate(weights, inputs) -> float:
- return weights[-1] + sum([weights[i] * inputs[i] for i in range(len(weights) - 1)])
- @staticmethod
- def transfer(activation: float) -> float:
- return 1.0 / (1.0 + exp(-activation))
- def forward_propagate(self, row):
- inputs = row
- for layer in self.layers:
- new_inputs = []
- for neuron in layer:
- activation: float = self.activate(neuron['weights'], inputs)
- neuron['output']: float = self.transfer(activation)
- new_inputs.append(neuron['output'])
- inputs = new_inputs
- return inputs
- @staticmethod
- def transfer_derivative(output):
- return output * (1.0 - output)
- def backward_propagate_error(self, expected):
- for i in reversed(range(len(self.layers))):
- layer = self.layers[i]
- errors = []
- if i != len(self.layers) - 1:
- for j in range(len(layer)):
- error = 0.0
- for neuron in self.layers[i + 1]:
- error += neuron['weights'][j] * neuron['delta']
- errors.append(error)
- else:
- for j in range(len(layer)):
- neuron = layer[j]
- errors.append(expected[j] - neuron['output'])
- for j in range(len(layer)):
- neuron = layer[j]
- neuron['delta'] = errors[j] * self.transfer_derivative(neuron['output'])
- def update_weights(self, row):
- for i in range(len(self.layers)):
- inputs = row[:-1]
- if i != 0:
- inputs = [neuron['output'] for neuron in self.layers[i - 1]]
- for neuron in self.layers[i]:
- for j in range(len(inputs)):
- neuron['weights'][j] += self.learning_rate * neuron['delta'] * inputs[j]
- neuron['weights'][-1] += self.learning_rate * neuron['delta']
- def train(self, train):
- for epoch in range(self.epochs):
- sum_error = 0
- for row in train:
- outputs = self.forward_propagate(row)
- expected = [0 for _ in range(self.n_outputs)]
- for i in range(len(row[-1])-1):
- if row[-1][i] == 1:
- expected[i] = 1
- #expected[row[-1]] = 1
- sum_error += sum([(expected[i] - outputs[i]) ** 2 for i in range(len(expected))])
- self.backward_propagate_error(expected)
- self.update_weights(row)
- print('>epoch = %d. error = %.3f' % (epoch + 1, sum_error))
- def predict(self, row):
- outputs = self.forward_propagate(row)
- return outputs.index(max(outputs))
- def test(self, test_data):
- score = sum(1 for inputs in test_data if self.predict(inputs) == inputs[-1])
- return score / len(test_data)
- def read_data():
- expected_data = {
- "Move-Forward": [1, 0, 0, 0],
- "Slight-Right-Turn": [0, 1, 0, 0],
- "Sharp-Right-Turn": [0, 0, 1, 0],
- "Slight-Left-Turn": [0, 0, 0, 1]
- }
- parsed_data = []
- with open('sensor_readings_24.data', 'r') as f:
- for line in f.readlines():
- parsed_data.append([])
- line = line.strip().split(',')
- for i in range(len(line)):
- if i < 24:
- parsed_data[-1].append(float(line[i]))
- else:
- parsed_data[-1].append(expected_data[line[i]])
- return parsed_data
- def dataset_minmax(dataset):
- return [[min(column), max(column)] for column in zip(*dataset)]
- def normalize_dataset(dataset):
- minmax = dataset_minmax(dataset)
- for row in dataset:
- for i in range(len(row) - 1):
- row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
- if __name__ == '__main__':
- data = read_data()
- normalize_dataset(data)
- train_data = data[:5000]
- test_data = data[5000:]
- network = Network(24, n_hidden=15, n_outputs=4, learning_rate=0.2, n_epoch=100)
- network.train(train_data)
- print(network.test(test_data))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement