Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #import stuff
- # package for math operations
- import numpy
- # package for sigmoid function
- import scipy.special
- # package for graphics
- import matplotlib.pyplot
- get_ipython().magic('matplotlib inline')
- # create the network
- class neuralNetwork:
- def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
- # create nodes
- self.inodes = inputnodes
- self.hnodes = hiddennodes
- self.onodes = outputnodes
- # use learningrate
- self.lr = learningrate
- # link weights
- self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
- self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
- # create sigmoid function
- self.activation_function = lambda x: scipy.special.expit(x)
- pass
- def train(self, inputs_list, targets_list):
- # convert inputs into array
- inputs = numpy.array(inputs_list, ndmin=2).T
- targets = numpy.array(targets_list, ndmin=2).T
- # calculate inputs and outputs of the hidden layer
- hidden_inputs = numpy.dot(self.wih, inputs)
- hidden_outputs = self.activation_function(hidden_inputs)
- # calculate inputs and outputs of output layer
- final_inputs = numpy.dot(self.who, hidden_outputs)
- final_outputs = self.activation_function(final_inputs)
- # calculate the error
- output_errors = targets - final_outputs
- hidden_errors = numpy.dot(self.who.T, output_errors)
- # adjust weights depending on error
- self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
- self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
- pass
- def query(self, inputs_list):
- # convert inputs into array
- inputs = numpy.array(inputs_list, ndmin=2).T
- # calculate inputs and outputs of hidden layer
- hidden_inputs = numpy.dot(self.wih, inputs)
- hidden_outputs = self.activation_function(hidden_inputs)
- # calculate inputs and outputs of output layer
- final_inputs = numpy.dot(self.who, hidden_outputs)
- final_outputs = self.activation_function(final_inputs)
- return final_outputs
- pass
- # train network
- # set number of nodes and inputs
- input_nodes = 784
- hidden_inputs = 100
- output_nodes = 10
- # set learningrate
- learning_rate = 0.2
- # create instance of neural network
- n = neuralNetwork(input_nodes, hidden_inputs, output_nodes, learning_rate)
- # load training data
- training_data_file = open ("mnist_train.csv", 'r')
- training_data_list = training_data_file.readlines()
- training_data_file.close()
- # split data records and train
- epochs = 2
- for e in range(epochs):
- for record in training_data_list:
- all_values = record.split(',')
- inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
- targets = numpy.zeros(output_nodes) + 0.01
- targets[int(all_values[0])] = 0.99
- n.train(inputs, targets)
- pass
- pass
- # test neural network
- # how well does the network perform?
- scorecard = []
- # open test data
- test_data_file = open ("mnist_test.csv", 'r')
- test_data_list = test_data_file.readlines()
- test_data_file.close()
- # iterate through the whole test set
- for record in test_data_list:
- # prepare data
- all_values = record.split(',')
- # print the correct answer
- correct_label = int(all_values[0])
- # print(correct_label, "correct label")
- inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
- # calculate outputs
- outputs = n.query(inputs)
- # translate network's answer
- label = numpy.argmax(outputs)
- # print(label, "networks answer")
- # fill scorecard
- if (label == correct_label):
- scorecard.append(1)
- else:
- scorecard.append(0)
- pass
- pass
- # calculate performance of network
- scorecard_array = numpy.asarray(scorecard)
- print ("Performance = ", scorecard_array.sum() / scorecard_array.size)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement