Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from random import seed
- from math import exp
- from random import random
- # Initialize a network
- # Put fixed weights to 0.5 on code.finki.ukim.mk if there is a problem with random()
- def initialize_network(n_inputs, n_hidden, n_outputs):
- """Build the network and initialize the weights
- :param n_inputs: number of neurons in the input layer
- :type n_inputs: int
- :param n_hidden: number of neurons in the hidden layer
- :type n_hidden: int
- :param n_outputs: number of neurons in the output layer
- (number of classes)
- :type n_outputs: int
- :return: the network as a list of the layers, where each
- layer is a dictionary with key 'weights' and their
- values
- :rtype: list(list(dict(str, list)))
- """
- network = list()
- hidden_layer = [{'weights': [random() for _ in range(n_inputs + 1)]}
- for _ in range(n_hidden)]
- network.append(hidden_layer)
- output_layer = [{'weights': [random() for _ in range(n_hidden + 1)]}
- for _ in range(n_outputs)]
- network.append(output_layer)
- return network
- def neuron_calculate(weights, inputs):
- """Calculate neuron activation value
- :param weights: given vector (list) of weights
- :type weights: list(float)
- :param inputs: given vector (list) of inputs
- :type inputs: list(float)
- :return: neuron calculation
- :rtype: float
- """
- activation = weights[-1]
- for i in range(len(weights) - 1):
- activation += weights[i] * inputs[i]
- return activation
- def sigmoid_activation(activation):
- """Sigmoid activation function
- :param activation: value for the activation function
- :type activation: float
- :return: activation function value
- :rtype: float
- """
- return 1.0 / (1.0 + exp(-activation))
- def forward_propagate(network, row):
- """Forward propagate input to a network output
- :param network: the network
- :param row: current data instance
- :return: list of outputs from the last layer
- """
- inputs = row
- for layer in network:
- new_inputs = []
- for neuron in layer:
- activation = neuron_calculate(neuron['weights'], inputs)
- neuron['output'] = sigmoid_activation(activation)
- new_inputs.append(neuron['output'])
- inputs = new_inputs
- return inputs
- def sigmoid_activation_derivative(output):
- """Calculates the derivative of an neuron output
- :param output: the output values
- :return: value of the derivative
- """
- return output * (1.0 - output)
- def backward_propagate_error(network, expected):
- """Backpropagate error and store in neurons
- :param network: the network
- :type network: list(list(dict(str, list)))
- :param expected: expected output values
- :type expected: list(int)
- :return: None
- """
- for i in reversed(range(len(network))):
- layer = network[i]
- errors = list()
- if i != len(network) - 1:
- for j in range(len(layer)):
- error = 0.0
- for neuron in network[i + 1]:
- error += (neuron['weights'][j] * neuron['delta'])
- errors.append(error)
- else:
- for j in range(len(layer)):
- neuron = layer[j]
- errors.append(expected[j] - neuron['output'])
- for j in range(len(layer)):
- neuron = layer[j]
- neuron['delta'] = errors[j] * sigmoid_activation_derivative(neuron['output'])
- def update_weights(network, row, l_rate):
- """Update network weights with error
- :param network: the network
- :type network: list(list(dict(str, list)))
- :param row: one data instance
- :type row: list
- :param l_rate: learning rate
- :type l_rate: float
- :return: None
- """
- for i in range(len(network)):
- inputs = row[:-1]
- if i != 0:
- inputs = [neuron['output'] for neuron in network[i - 1]]
- for neuron in network[i]:
- for j in range(len(inputs)):
- neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
- neuron['weights'][-1] += l_rate * neuron['delta']
- def train_network(network, train, l_rate, n_epoch, n_outputs, verbose=True):
- """Train a network for a fixed number of epochs
- :param network: the network
- :type network: list(list(dict(str, list)))
- :param train: train dataset
- :type train: list
- :param l_rate: learning rate
- :type l_rate: float
- :param n_epoch: number of epochs
- :type n_epoch: int
- :param n_outputs: number of neurons (classes) in output layer
- :type n_outputs: int
- :param verbose: True for printing log, else False
- :type: verbose: bool
- :return: None
- """
- for epoch in range(n_epoch):
- sum_error = 0
- for row in train:
- outputs = forward_propagate(network, row)
- expected = [0] * n_outputs
- expected[row[-1]] = 1
- sum_error += sum([(expected[i] - outputs[i]) ** 2 for i in range(len(expected))])
- backward_propagate_error(network, expected)
- update_weights(network, row, l_rate)
- if verbose:
- print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))
- def predict(network, row):
- """Make a prediction with a network
- :param network: the network
- :type network: list(list(dict(str, list)))
- :param row: one data instance
- :type row: list
- :return: predicted classes
- """
- outputs = forward_propagate(network, row)
- return outputs.index(max(outputs))
- dataset = [
- [6.3, 2.9, 5.6, 1.8, 0],
- [6.5, 3.0, 5.8, 2.2, 0],
- [7.6, 3.0, 6.6, 2.1, 0],
- [4.9, 2.5, 4.5, 1.7, 0],
- [7.3, 2.9, 6.3, 1.8, 0],
- [6.7, 2.5, 5.8, 1.8, 0],
- [7.2, 3.6, 6.1, 2.5, 0],
- [6.5, 3.2, 5.1, 2.0, 0],
- [6.4, 2.7, 5.3, 1.9, 0],
- [6.8, 3.0, 5.5, 2.1, 0],
- [5.7, 2.5, 5.0, 2.0, 0],
- [5.8, 2.8, 5.1, 2.4, 0],
- [6.4, 3.2, 5.3, 2.3, 0],
- [6.5, 3.0, 5.5, 1.8, 0],
- [7.7, 3.8, 6.7, 2.2, 0],
- [7.7, 2.6, 6.9, 2.3, 0],
- [6.0, 2.2, 5.0, 1.5, 0],
- [6.9, 3.2, 5.7, 2.3, 0],
- [5.6, 2.8, 4.9, 2.0, 0],
- [7.7, 2.8, 6.7, 2.0, 0],
- [6.3, 2.7, 4.9, 1.8, 0],
- [6.7, 3.3, 5.7, 2.1, 0],
- [7.2, 3.2, 6.0, 1.8, 0],
- [6.2, 2.8, 4.8, 1.8, 0],
- [6.1, 3.0, 4.9, 1.8, 0],
- [6.4, 2.8, 5.6, 2.1, 0],
- [7.2, 3.0, 5.8, 1.6, 0],
- [7.4, 2.8, 6.1, 1.9, 0],
- [7.9, 3.8, 6.4, 2.0, 0],
- [6.4, 2.8, 5.6, 2.2, 0],
- [6.3, 2.8, 5.1, 1.5, 0],
- [6.1, 2.6, 5.6, 1.4, 0],
- [7.7, 3.0, 6.1, 2.3, 0],
- [6.3, 3.4, 5.6, 2.4, 0],
- [5.1, 3.5, 1.4, 0.2, 1],
- [4.9, 3.0, 1.4, 0.2, 1],
- [4.7, 3.2, 1.3, 0.2, 1],
- [4.6, 3.1, 1.5, 0.2, 1],
- [5.0, 3.6, 1.4, 0.2, 1],
- [5.4, 3.9, 1.7, 0.4, 1],
- [4.6, 3.4, 1.4, 0.3, 1],
- [5.0, 3.4, 1.5, 0.2, 1],
- [4.4, 2.9, 1.4, 0.2, 1],
- [4.9, 3.1, 1.5, 0.1, 1],
- [5.4, 3.7, 1.5, 0.2, 1],
- [4.8, 3.4, 1.6, 0.2, 1],
- [4.8, 3.0, 1.4, 0.1, 1],
- [4.3, 3.0, 1.1, 0.1, 1],
- [5.8, 4.0, 1.2, 0.2, 1],
- [5.7, 4.4, 1.5, 0.4, 1],
- [5.4, 3.9, 1.3, 0.4, 1],
- [5.1, 3.5, 1.4, 0.3, 1],
- [5.7, 3.8, 1.7, 0.3, 1],
- [5.1, 3.8, 1.5, 0.3, 1],
- [5.4, 3.4, 1.7, 0.2, 1],
- [5.1, 3.7, 1.5, 0.4, 1],
- [4.6, 3.6, 1.0, 0.2, 1],
- [5.1, 3.3, 1.7, 0.5, 1],
- [4.8, 3.4, 1.9, 0.2, 1],
- [5.0, 3.0, 1.6, 0.2, 1],
- [5.0, 3.4, 1.6, 0.4, 1],
- [5.2, 3.5, 1.5, 0.2, 1],
- [5.2, 3.4, 1.4, 0.2, 1],
- [5.5, 2.3, 4.0, 1.3, 2],
- [6.5, 2.8, 4.6, 1.5, 2],
- [5.7, 2.8, 4.5, 1.3, 2],
- [6.3, 3.3, 4.7, 1.6, 2],
- [4.9, 2.4, 3.3, 1.0, 2],
- [6.6, 2.9, 4.6, 1.3, 2],
- [5.2, 2.7, 3.9, 1.4, 2],
- [5.0, 2.0, 3.5, 1.0, 2],
- [5.9, 3.0, 4.2, 1.5, 2],
- [6.0, 2.2, 4.0, 1.0, 2],
- [6.1, 2.9, 4.7, 1.4, 2],
- [5.6, 2.9, 3.6, 1.3, 2],
- [6.7, 3.1, 4.4, 1.4, 2],
- [5.6, 3.0, 4.5, 1.5, 2],
- [5.8, 2.7, 4.1, 1.0, 2],
- [6.2, 2.2, 4.5, 1.5, 2],
- [5.6, 2.5, 3.9, 1.1, 2],
- [5.9, 3.2, 4.8, 1.8, 2],
- [6.1, 2.8, 4.0, 1.3, 2],
- [6.3, 2.5, 4.9, 1.5, 2],
- [6.1, 2.8, 4.7, 1.2, 2],
- [6.4, 2.9, 4.3, 1.3, 2],
- [6.6, 3.0, 4.4, 1.4, 2],
- [6.8, 2.8, 4.8, 1.4, 2],
- [6.7, 3.0, 5.0, 1.7, 2],
- [6.0, 2.9, 4.5, 1.5, 2],
- [5.7, 2.6, 3.5, 1.0, 2],
- [5.5, 2.4, 3.8, 1.1, 2],
- [5.4, 3.0, 4.5, 1.5, 2],
- [6.0, 3.4, 4.5, 1.6, 2],
- [6.7, 3.1, 4.7, 1.5, 2],
- [6.3, 2.3, 4.4, 1.3, 2],
- [5.6, 3.0, 4.1, 1.3, 2],
- [5.5, 2.5, 4.0, 1.3, 2],
- [5.5, 2.6, 4.4, 1.2, 2],
- [6.1, 3.0, 4.6, 1.4, 2],
- [5.8, 2.6, 4.0, 1.2, 2],
- [5.0, 2.3, 3.3, 1.0, 2],
- [5.6, 2.7, 4.2, 1.3, 2],
- [5.7, 3.0, 4.2, 1.2, 2],
- [5.7, 2.9, 4.2, 1.3, 2],
- [6.2, 2.9, 4.3, 1.3, 2],
- [5.1, 2.5, 3.0, 1.1, 2],
- [5.7, 2.8, 4.1, 1.3, 2],
- [6.4, 3.1, 5.5, 1.8, 0],
- [6.0, 3.0, 4.8, 1.8, 0],
- [6.9, 3.1, 5.4, 2.1, 0],
- [6.8, 3.2, 5.9, 2.3, 0],
- [6.7, 3.3, 5.7, 2.5, 0],
- [6.7, 3.0, 5.2, 2.3, 0],
- [6.3, 2.5, 5.0, 1.9, 0],
- [6.5, 3.0, 5.2, 2.0, 0],
- [6.2, 3.4, 5.4, 2.3, 0],
- [4.7, 3.2, 1.6, 0.2, 1],
- [4.8, 3.1, 1.6, 0.2, 1],
- [5.4, 3.4, 1.5, 0.4, 1],
- [5.2, 4.1, 1.5, 0.1, 1],
- [5.5, 4.2, 1.4, 0.2, 1],
- [4.9, 3.1, 1.5, 0.2, 1],
- [5.0, 3.2, 1.2, 0.2, 1],
- [5.5, 3.5, 1.3, 0.2, 1],
- [4.9, 3.6, 1.4, 0.1, 1],
- [4.4, 3.0, 1.3, 0.2, 1],
- [5.1, 3.4, 1.5, 0.2, 1],
- [5.0, 3.5, 1.3, 0.3, 1],
- [4.5, 2.3, 1.3, 0.3, 1],
- [4.4, 3.2, 1.3, 0.2, 1],
- [5.0, 3.5, 1.6, 0.6, 1],
- [5.9, 3.0, 5.1, 1.8, 0],
- [5.1, 3.8, 1.9, 0.4, 1],
- [4.8, 3.0, 1.4, 0.3, 1],
- [5.1, 3.8, 1.6, 0.2, 1],
- [5.5, 2.4, 3.7, 1.0, 2],
- [5.8, 2.7, 3.9, 1.2, 2],
- [6.0, 2.7, 5.1, 1.6, 2],
- [6.7, 3.1, 5.6, 2.4, 0],
- [6.9, 3.1, 5.1, 2.3, 0],
- [5.8, 2.7, 5.1, 1.9, 0],
- ]
- if __name__ == "__main__":
- # ne menuvaj
- seed(1)
- att1 = float(input())
- att2 = float(input())
- att3 = float(input())
- att4 = float(input())
- planttype = int(input())
- testCase = [att1, att2, att3, att4, planttype]
- # vasiot kod ovde
- lr = [0.3,0.5,0.7]
- train_set = dataset[0:-10]
- validation_set = dataset[-10:]
- size_inputs = len(dataset[0]) - 1
- size_outputs = len(set([row[-1] for row in dataset]))
- best = 0
- for learning_rate in lr:
- network = initialize_network(size_inputs, 3, size_outputs)
- train_network(network, train_set, learning_rate, 20, size_outputs,False)
- score = 0
- for i in validation_set:
- prediction = predict(network, i)
- if prediction == i[-1]:
- score = score + 1
- if score >= best:
- best = score
- best_network = network
- print(predict(best_network, testCase))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement