Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- """
- Small code snippet with a SUPER SLOW AND INEFFICIENT MLP IMPLEMENTATION.
- """
- from math import exp, tanh, sin, pi
- import numpy as np
- from random import random
- class MLP:
- """
- Classe que cria a rede neural artificial do tipo "MLP"
- """
- def __init__(self, nodes_in, nodes_out, nodes_per_hid_layer=[],
- train_data=[], learn_rate=0.5, iteration_limit=10000,
- target_error=0.01, classification=False, output_function=0,
- hidden_function=1):
- self.hidden_function = hidden_function
- self.output_function = output_function
- self.layers = len(nodes_per_hid_layer)
- self.learn_rate = learn_rate
- self.epoch = iteration_limit
- self.nodes_out = nodes_out
- self.train_data = train_data
- self.error = []
- self.target_error = target_error
- self.classification = classification
- self.nodes_in = nodes_in
- self.nodes_per_hid_layer = nodes_per_hid_layer
- self.nodes_out = nodes_out
- self.network = list()
- self.label_dict = dict()
- self.resetWeights()
- # Atualiza os dados de treinamento
- def setTrainData(self, new_data):
- self.train_data = new_data
- # Atualiza o número de iterações
- def setEpochs(self, p):
- self.epoch = p
- # Atualiza a taxa de aprendizado
- def setLearnRate(self, lr):
- self.learn_rate = lr
- # Atualiza os dados de treinamento
- def resetWeights(self):
- """
- Resets weights
- """
- self.network = []
- previous_nodes = self.nodes_in
- for nodes in self.nodes_per_hid_layer:
- hidden_layers = [{'weights':[random() for i in range(previous_nodes + 1)]} for i in range(nodes)]
- self.network.append(hidden_layers)
- previous_nodes = nodes
- output_layer = [{'weights':[random() for i in range(previous_nodes + 1)]} for i in range(self.nodes_out)]
- self.network.append(output_layer)
- # Propaga para frente ao longo da RNA dada uma entrada
- def forward_propagate(self, inputs):
- """
- Performs forward propagation
- """
- layer_count = 0
- for layer in self.network:
- new_inputs = []
- for neuron in layer:
- activation = self.activate(neuron['weights'], inputs)
- neuron['output'] = self.transfer(activation, layer_count)
- new_inputs.append(neuron['output'])
- inputs = new_inputs
- layer_count += 1
- return inputs
- # Calcula a ativação do neurônio para uma dada entrada
- @staticmethod
- def activate(weights, inputs):
- """
- Node Linear Activation
- """
- activation = weights[-1]
- for i in range(len(weights)-1):
- activation += weights[i] * inputs[i]
- return activation
- # Transfere para a Funções de Ativação do nó
- def transfer(self, activation, layer):
- """
- Activation Function
- """
- # Seleciona o tipo de ativação do neuron
- if not layer == self.layers:
- if self.hidden_function == 1:
- return 1.0 / (1.0 + exp(-activation))
- elif self.hidden_function == 2:
- return tanh(activation)
- else:
- return activation
- else:
- if self.output_function == 1:
- return 1.0 / (1.0 + exp(-activation))
- elif self.output_function == 2:
- return tanh(activation)
- else:
- return activation
- def transfer_derivative(self, output, layer):
- """
- Derivative of the Activation Function
- """
- # Seleciona o tipo de ativação do neuron
- if not layer == self.layers:
- if self.hidden_function == 1:
- return output * (1.0 - output)
- elif self.hidden_function == 2:
- return 1 - (output) ** 2
- else:
- return 1.0
- else:
- if self.output_function == 1:
- return output * (1.0 - output)
- elif self.output_function == 2:
- return 1 - (output) ** 2
- else:
- return 1.0
- # Backpropagate o erro e armazena nos próprios neurons
- def backward_propagate_error(self, expected):
- """
- Performs backpropagation
- """
- for i in reversed(range(len(self.network))):
- layer = self.network[i]
- errors = list()
- if i != len(self.network) - 1:
- for j in range(len(layer)):
- error = 0.0
- for neuron in self.network[i + 1]:
- error += (neuron['weights'][j] * neuron['delta'])
- errors.append(error)
- else:
- for j in range(len(layer)):
- neuron = layer[j]
- errors.append(expected[j] - neuron['output'])
- for j in range(len(layer)):
- neuron = layer[j]
- neuron['delta'] = errors[j] * self.transfer_derivative(neuron['output'], i)
- # Atualiza os pesos dos neurons
- def update_weights(self, row, l_rate):
- """
- Random weights initializer
- """
- for i in range(len(self.network)):
- inputs = row[:-1]
- if i != 0:
- inputs = [neuron['output'] for neuron in self.network[i - 1]]
- for neuron in self.network[i]:
- for j in range(len(inputs)):
- neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
- neuron['weights'][-1] += l_rate * neuron['delta']
- # Representação da classificação
- def reshape_output(self):
- """
- Reshapes the output
- """
- self.label_dict = dict()
- itens_set = set()
- for item in self.train_data:
- for out in item[-self.nodes_out:]:
- itens_set.add(out)
- # Atualiza o dicionário
- i = 0
- for item in sorted(itens_set):
- self.label_dict[item] = i
- i += 1
- # Train a network for a fixed number of epochs
- def train_network(self):
- """
- Trains the Network
- """
- self.reshape_output()
- self.error = []
- train = self.train_data
- n_epoch = self.epoch
- l_rate = self.learn_rate
- n_outputs = self.nodes_out
- # Reorganiza a saídas para classificação
- error_count = 0
- for epoch in range(n_epoch):
- sum_error = 0
- for row in train:
- outputs = self.forward_propagate(row)
- if self.classification:
- expected = [0 for i in range(n_outputs)]
- expected[self.label_dict[row[-1]]] = 1
- else:
- expected = row[-n_outputs:]
- sum_error += sum([(expected[i] - outputs[i])**2 for i in range(len(expected))])
- self.backward_propagate_error(expected)
- self.update_weights(row, l_rate)
- total_error = sum_error / (len(expected) * len(train))
- self.error.append(total_error)
- if (total_error <= self.target_error and error_count > 5) or epoch == n_epoch:
- print('>epoch=%d, Taxa de Apredizado=%.3f, error=%.3f' % (epoch, l_rate, total_error))
- error_count += 1
- break
- # print('>epoch=%d, Taxa de Apredizado=%.3f, error=%.3f' % (epoch, l_rate, total_error))
- # Usa a RNA para fazer uma previsão
- def predict(self, row):
- outputs = self.forward_propagate(row)
- if not self.classification:
- return outputs
- else:
- return outputs.index(max(outputs))
- ############### EXEMPLO
- # Arquitetura da rede
- # Variáveis de controle
- n_inputs = 2 # Numero de entradas
- n_outputs = 2 # Numnero de saídas
- n_hidden = [10] # Número de neurons na(s) camada(s) escondidas
- epochs = 5000 # Épocas
- learn_rate = 0.1 # Taxa de aprendizado
- target_error = 0.01 # Treina até alcançar esse erro ou o número de épocas
- classification = False # Se o problema é de Classificação ou não
- # Dados
- x = np.random.uniform(-2.5 * pi, 2.5 * pi, 50) # random values
- dataset = [[item, sin(item)] for item in x]
- # Modelo
- RNA = MLP(n_inputs, n_outputs, n_hidden, dataset, learn_rate, epochs, target_error, classification, output_function=2)
- # Treinamento
- RNA.train_network()
Add Comment
Please, Sign In to add comment