Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from numpy import exp,array, random, dot
- class NeuralNetwork():
- def __init__(self):
- # Seed the random number generator, so it generates the same numbers
- # every time the program runs.
- random.seed(1)
- # We model a single neuron, with 3 input connections and 1 output connection.
- # We assign random weights to a 3 x 1 matrix, with values in the range -1 to 1
- # and mean 0.
- self.synaptic_weight = 2*random.random((3,1)) - 1
- # The Sigmoid function, which describes an S shaped curve.
- # We pass the weighted sum of the inputs through this function to
- # normalise them between 0 and 1.
- def _sigmoid_(self,x):
- return 1 / (1 + exp(-x))
- # The derivative of the Sigmoid function.
- # This is the gradient of the Sigmoid curve.
- # It indicates how confident we are about the existing weight
- def _sigmoid_derivative(self,x):
- return x * (1 - x)
- # We train the neural network through a process of trial and error.
- # Adjusting the synaptic weights each time.
- def train(self , training_set_inputs , training_set_outputs , number_of_training_iterations):
- for iteration in xrange(number_of_training_iterations):
- # Pass the training set through our neural network (a single neuron).
- output = self.think(training_set_inputs)
- # Calculate the error (The difference between the desired output
- # and the predicted output).
- error = (training_set_outputs - output)
- # Multiply the error by the input and again by the gradient of the Sigmoid curve.
- # This means less confident weights are adjusted more.
- # This means inputs, which are zero, do not cause changes to the weights.
- adjustment = dot(training_set_inputs.T , error * self._sigmoid_derivative(output))
- # Adjust the weights.
- self.synaptic_weights += adjustment
- # The neural network thinks.
- def think(self, inputs):
- # Pass inputs through our neural network (our single neuron).
- return self.__sigmoid(dot(inputs, self.synaptic_weights))
- if __name__ == "__main__":
- neural_network = NeuralNetwork()
- print ('Random starting synaptic weights: ')
- print (neural_network.synaptic_weights)
- training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
- training_set_outputs = array([[0, 1, 1, 0]]).T
- neural_network.train(training_set_inputs,training_set_outputs,10000)
- print ("New synaptic weights after training: ")
- print (neural_network.synaptic_weights)
- print ("Considering new situation [1, 0, 0] -> ?: ")
- print (neural_network.think(array([1, 0, 0])))
- from numpy import exp,array, random, dot
- class NeuralNetwork():
- def __init__(self):
- # Seed the random number generator, so it generates the same numbers
- # every time the program runs.
- random.seed(1)
- # We model a single neuron, with 3 input connections and 1 output connection.
- # We assign random weights to a 3 x 1 matrix, with values in the range -1 to 1
- # and mean 0.
- self.synaptic_weight = 2*random.random((3,1)) - 1
- # The Sigmoid function, which describes an S shaped curve.
- # We pass the weighted sum of the inputs through this function to
- # normalise them between 0 and 1.
- def _sigmoid_(self,x):
- return 1 / (1 + exp(-x))
- # The derivative of the Sigmoid function.
- # This is the gradient of the Sigmoid curve.
- # It indicates how confident we are about the existing weight
- def _sigmoid_derivative(self,x):
- return x * (1 - x)
- # We train the neural network through a process of trial and error.
- # Adjusting the synaptic weights each time.
- def train(self , training_set_inputs , training_set_outputs , number_of_training_iterations):
- for iteration in xrange(number_of_training_iterations):
- # Pass the training set through our neural network (a single neuron).
- output = self.think(training_set_inputs)
- # Calculate the error (The difference between the desired output
- # and the predicted output).
- error = (training_set_outputs - output)
- # Multiply the error by the input and again by the gradient of the Sigmoid curve.
- # This means less confident weights are adjusted more.
- # This means inputs, which are zero, do not cause changes to the weights.
- adjustment = dot(training_set_inputs.T , error * self._sigmoid_derivative(output))
- # Adjust the weights.
- self.synaptic_weights += adjustment
- # The neural network thinks.
- def think(self, inputs):
- # Pass inputs through our neural network (our single neuron).
- return self.__sigmoid(dot(inputs, self.synaptic_weights))
- if __name__ == "__main__":
- neural_network = NeuralNetwork()
- print ('Random starting synaptic weights: ')
- print (neural_network.synaptic_weights)
- training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
- training_set_outputs = array([[0, 1, 1, 0]]).T
- neural_network.train(training_set_inputs,training_set_outputs,10000)
- print ("New synaptic weights after training: ")
- print (neural_network.synaptic_weights)
- print ("Considering new situation [1, 0, 0] -> ?: ")
- print (neural_network.think(array([1, 0, 0])))
Add Comment
Please, Sign In to add comment