SHOW:
|
|
- or go back to the newest paste.
| 1 | - | def train(self, training_set_inputs, training_set_outputs): |
| 1 | + | def train(self, training_set_inputs, training_set_outputs): |
| 2 | - | |
| 2 | + | for i in range(100): |
| 3 | - | for i in range(100): |
| 3 | + | self.train_global_step(training_set_inputs, training_set_outputs) |
| 4 | - | self.train_global_step(training_set_inputs, training_set_outputs) |
| 4 | + | if self.layer2_error < 0.1: |
| 5 | - | if (self.layer2_error) < 0.1: |
| 5 | + | break |
| 6 | - | break |
| 6 | + | |
| 7 | def train_global_step(self, training_set_inputs, training_set_outputs): | |
| 8 | - | |
| 8 | + | self.layer2_error = 0 |
| 9 | - | def train_global_step(self, training_set_inputs, training_set_outputs): |
| 9 | + | self.learning_rate = 0.1 |
| 10 | - | |
| 10 | + | |
| 11 | - | self.layer2_error = 0 |
| 11 | + | for iteration in range(len(training_set_inputs)): |
| 12 | - | self.learning_rate = 0.1 |
| 12 | + | # Pass the training set through our neural network |
| 13 | output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs) | |
| 14 | - | for iteration in range(len(training_set_inputs)): |
| 14 | + | |
| 15 | - | # Pass the training set through our neural network |
| 15 | + | # Calculate the error for layer 2 (The difference between the desired output |
| 16 | - | output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs) |
| 16 | + | # and the predicted output). |
| 17 | layer2_error = training_set_outputs - output_from_layer_2 | |
| 18 | - | # Calculate the error for layer 2 (The difference between the desired output |
| 18 | + | layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2) |
| 19 | - | # and the predicted output). |
| 19 | + | |
| 20 | - | layer2_error = training_set_outputs - output_from_layer_2 |
| 20 | + | # Calculate the error for layer 1 (By looking at the weights in layer 1, |
| 21 | - | layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2) |
| 21 | + | # we can determine by how much layer 1 contributed to the error in layer 2). |
| 22 | layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T) | |
| 23 | - | # Calculate the error for layer 1 (By looking at the weights in layer 1, |
| 23 | + | layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1) |
| 24 | - | # we can determine by how much layer 1 contributed to the error in layer 2). |
| 24 | + | |
| 25 | - | layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T) |
| 25 | + | # Calculate how much to adjust the weights by |
| 26 | - | layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1) |
| 26 | + | layer1_adjustment = training_set_inputs.T.dot(layer1_delta) |
| 27 | layer2_adjustment = output_from_layer_1.T.dot(layer2_delta) | |
| 28 | - | # Calculate how much to adjust the weights by |
| 28 | + | |
| 29 | - | layer1_adjustment = training_set_inputs.T.dot(layer1_delta) |
| 29 | + | # Adjust the weights. |
| 30 | - | layer2_adjustment = output_from_layer_1.T.dot(layer2_delta) |
| 30 | + | self.layer1.synaptic_weights += self.learning_rate*layer1_adjustment |
| 31 | self.layer2.synaptic_weights += self.learning_rate*layer2_adjustment |