Advertisement
here2share

# pure_python_neural_network2.py

Feb 6th, 2019
241
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.40 KB | None | 0 0
  1. # pure_python_neural_network2.py
  2.  
  3. # Backprop on the Seeds Dataset
  4. from random import seed
  5. from random import randrange
  6. from random import random
  7. from csv import reader
  8. from math import exp
  9.  
  10. # Load a CSV file
  11. def load_csv(filename):
  12.     dataset = list()
  13.     with open(filename, 'r') as file:
  14.         csv_reader = reader(file)
  15.         for row in csv_reader:
  16.             if not row:
  17.                 continue
  18.             dataset.append(row)
  19.     return dataset
  20.  
  21. # Convert string column to float
  22. def str_column_to_float(dataset, column):
  23.     for row in range(len(dataset)):
  24.         dataset[row][column] = float(dataset[row][column])
  25. # Convert string column to integer
  26. def str_column_to_int(dataset, column):
  27.     class_values = [row[column] for row in dataset]
  28.     unique = set(class_values)
  29.     lookup = dict()
  30.     for i, value in enumerate(unique):
  31.         lookup[value] = i
  32.     return lookup
  33.  
  34. # Find the min and max values for each column
  35. def dataset_minmax(dataset):
  36.     minmax = list()
  37.     stats = [[min(column), max(column)] for column in zip(*dataset)]
  38.     return stats
  39.  
  40. # Rescale dataset columns to the range 0-1
  41. def normalize_dataset(dataset, minmax):
  42.     for row in dataset:
  43.         for i in range(len(row)-1):
  44.             row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
  45.  
  46. # Split a dataset into k folds
  47. def cross_validation_split(dataset, n_folds):
  48.     dataset_split = list()
  49.     dataset_copy = list(dataset)
  50.     fold_size = int(len(dataset) / n_folds)
  51.     for i in range(n_folds):
  52.         fold = list()
  53.         while len(fold) < fold_size:
  54.             index = randrange(len(dataset_copy))
  55.             fold.append(dataset_copy.pop(index))
  56.         dataset_split.append(fold)
  57.     return dataset_split
  58.  
  59. # Calculate accuracy percentage
  60. def accuracy_metric(actual, predicted):
  61.     correct = 0
  62.     for i in range(len(actual)):
  63.         if int(actual[i]):
  64.             correct += predicted[i]
  65.         else:
  66.             correct -= (1 - predicted[i])
  67.     return correct / float(len(actual)) * 100.0
  68.  
  69. # Evaluate an algorithm using a cross validation split
  70. def evaluate_algorithm(dataset, algorithm, n_folds, *args):
  71.     folds = cross_validation_split(dataset, n_folds)
  72.     scores = list()
  73.     for fold in folds:
  74.         train_set = list(folds)
  75.         train_set.remove(fold)
  76.         train_set = sum(train_set, [])
  77.         test_set = list()
  78.         for row in fold:
  79.             row_copy = list(row)
  80.             test_set.append(row_copy)
  81.             row_copy[-1] = None
  82.         predicted = algorithm(train_set, test_set, *args)
  83.         actual = [row[-1] for row in fold]
  84.         accuracy = accuracy_metric(actual, predicted)
  85.         scores.append(accuracy)
  86.     return scores
  87.  
  88. # Calculate neuron activation for an input
  89. def activate(weights, inputs):
  90.     activation = weights[-1]
  91.     for i in range(len(weights)-1):
  92.         activation += weights[i] * inputs[i]
  93.     return activation
  94.  
  95. # Transfer neuron activation
  96. def transfer(activation):
  97.     return 1.0 / (1.0 + exp(-activation))
  98.  
  99. # Forward propagate input to a network output
  100. def forward_propagate(network, row):
  101.     inputs = row
  102.     for layer in network:
  103.         new_inputs = []
  104.         for neuron in layer:
  105.             activation = activate(neuron['weights'], inputs)
  106.             neuron['output'] = transfer(activation)
  107.             new_inputs.append(neuron['output'])
  108.         inputs = new_inputs
  109.     return inputs
  110.  
  111. # Calculate the derivative of an neuron output
  112. def transfer_derivative(output):
  113.     return output * (1.0 - output)
  114.  
  115. # Backpropagate error and store in neurons
  116. def backward_propagate_error(network, expected):
  117.     for i in reversed(range(len(network))):
  118.         layer = network[i]
  119.         errors = list()
  120.         if i != len(network)-1:
  121.             for j in range(len(layer)):
  122.                 error = 0.0
  123.                 for neuron in network[i + 1]:
  124.                     error += (neuron['weights'][j] * neuron['delta'])
  125.                 errors.append(error)
  126.         else:
  127.             for j in range(len(layer)):
  128.                 neuron = layer[j]
  129.                 errors.append(expected[j] - neuron['output'])
  130.         for j in range(len(layer)):
  131.             neuron = layer[j]
  132.             neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
  133.  
  134. # Update network weights with error
  135. def update_weights(network, row, l_rate):
  136.     for i in range(len(network)):
  137.         inputs = row[:-1]
  138.         if i != 0:
  139.             inputs = [neuron['output'] for neuron in network[i - 1]]
  140.         for neuron in network[i]:
  141.             for j in range(len(inputs)):
  142.                 neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
  143.             neuron['weights'][-1] += l_rate * neuron['delta']
  144.  
  145. # Train a network for a fixed number of epochs
  146. def train_network(network, train, l_rate, n_epoch, n_outputs):
  147.     for epoch in range(n_epoch):
  148.         for row in train:
  149.             outputs = forward_propagate(network, row)
  150.             expected = [0 for i in range(n_outputs)]
  151.             expected[int(row[-1])] = 1
  152.             backward_propagate_error(network, expected)
  153.             update_weights(network, row, l_rate)
  154.  
  155. # Initialize a network
  156. def initialize_network(n_inputs, n_hidden, n_outputs):
  157.     network = list()
  158.     hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
  159.     network.append(hidden_layer)
  160.     output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
  161.     network.append(output_layer)
  162.     return network
  163.  
  164. # Make a prediction with a network
  165. def predict(network, row):
  166.     outputs = forward_propagate(network, row)
  167.     return max(outputs)
  168.  
  169. # Backpropagation Algorithm With Stochastic Gradient Descent
  170. def back_propagation(train, test, l_rate, n_epoch, n_hidden):
  171.     n_inputs = len(train[0]) - 1
  172.     n_outputs = len(set([row[-1] for row in train]))
  173.     network = initialize_network(n_inputs, n_hidden, n_outputs)
  174.     train_network(network, train, l_rate, n_epoch, n_outputs)
  175.     predictions = list()
  176.     for row in test:
  177.         prediction = predict(network, row)
  178.         predictions.append(prediction)
  179.     return(predictions)
  180.  
  181. # Test Backprop on Seeds dataset
  182. seed(1)
  183.  
  184. # load and prepare data
  185. if 0:
  186.     '''
  187.     filename = 'seeds_dataset.csv'
  188.     dataset = load_csv(filename)
  189.     '''
  190. else:
  191.     data = '''
  192.     2.7810836  2.550537003  0
  193.     1.465489372  2.362125076  0
  194.     3.396561688  4.400293529  0
  195.     1.38807019  1.850220317  0
  196.     3.06407232  3.005305973  0
  197.     7.627531214  2.759262235  1
  198.     5.332441248  2.088626775  1
  199.     6.922596716  1.77106367  1
  200.     8.675418651  -0.242068655 1
  201.     7.673756466  3.508563011  1
  202.     '''.strip().splitlines()
  203.     dataset = [i.split() for i in data]
  204. for i in range(len(dataset[0])-1):
  205.     str_column_to_float(dataset, i)
  206. # normalize input variables
  207. minmax = dataset_minmax(dataset)
  208. normalize_dataset(dataset, minmax)
  209. # evaluate algorithm
  210. n_folds = 5
  211. l_rate = 0.3
  212. n_epoch = 500
  213. n_hidden = 5
  214. scores = evaluate_algorithm(dataset, back_propagation, n_folds, l_rate, n_epoch, n_hidden)
  215. print('Scores: %s' % scores)
  216. print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement