Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train(self, training_set_inputs, training_set_outputs):
- for i in range(100):
- self.train_global_step(training_set_inputs, training_set_outputs)
- if self.layer2_error < 0.1:
- break
- def train_global_step(self, training_set_inputs, training_set_outputs):
- self.layer2_error = 0
- self.learning_rate = 0.1
- for iteration in range(len(training_set_inputs)):
- # Pass the training set through our neural network
- output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)
- # Calculate the error for layer 2 (The difference between the desired output
- # and the predicted output).
- layer2_error = training_set_outputs - output_from_layer_2
- layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)
- # Calculate the error for layer 1 (By looking at the weights in layer 1,
- # we can determine by how much layer 1 contributed to the error in layer 2).
- layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T)
- layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)
- # Calculate how much to adjust the weights by
- layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
- layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
- # Adjust the weights.
- self.layer1.synaptic_weights += self.learning_rate*layer1_adjustment
- self.layer2.synaptic_weights += self.learning_rate*layer2_adjustment
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement