Advertisement
Guest User

Untitled

a guest
Apr 24th, 2019
96
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.04 KB | None | 0 0
  1. import numpy as np
  2.  
  3.  
  4. class NeuralNetwork(object):
  5. def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
  6. # Set number of nodes in input, hidden and output layers.
  7. self.input_nodes = input_nodes
  8. self.hidden_nodes = hidden_nodes
  9. self.output_nodes = output_nodes
  10.  
  11. # Initialize weights
  12. self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes))
  13.  
  14. self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes))
  15. self.lr = learning_rate
  16.  
  17. #### TODO: Set self.activation_function to your implemented sigmoid function ####
  18. #
  19. # Note: in Python, you can define a function with a lambda expression,
  20. # as shown below.
  21. self.activation_function = lambda x : 1 / (1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
  22.  
  23. ### If the lambda code above is not something you're familiar with,
  24. # You can uncomment out the following three lines and put your
  25. # implementation there instead.
  26. #
  27. #def sigmoid(x):
  28. # return 0 # Replace 0 with your sigmoid calculation here
  29. #self.activation_function = sigmoid
  30.  
  31.  
  32. def train(self, features, targets):
  33. ''' Train the network on batch of features and targets.
  34.  
  35. Arguments
  36. ---------
  37.  
  38. features: 2D array, each row is one data record, each column is a feature
  39. targets: 1D array of target values
  40.  
  41. '''
  42. n_records = features.shape[0]
  43. delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
  44. delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
  45. for X, y in zip(features, targets):
  46.  
  47. final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below
  48. # Implement the backproagation function below
  49. delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
  50. delta_weights_i_h, delta_weights_h_o)
  51. self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
  52.  
  53.  
  54. def forward_pass_train(self, X):
  55. ''' Implement forward pass here
  56.  
  57. Arguments
  58. ---------
  59. X: features batch
  60.  
  61. '''
  62. #### Implement the forward pass here ####
  63. ### Forward pass ###
  64. # TODO: Hidden layer - Replace these values with your calculations.
  65. hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
  66. hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
  67.  
  68. # TODO: Output layer - Replace these values with your calculations.
  69. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
  70. final_outputs = final_inputs # signals from final output layer
  71.  
  72. return final_outputs, hidden_outputs
  73.  
  74. def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
  75. ''' Implement backpropagation
  76.  
  77. Arguments
  78. ---------
  79. final_outputs: output from forward pass
  80. y: target (i.e. label) batch
  81. delta_weights_i_h: change in weights from input to hidden layers
  82. delta_weights_h_o: change in weights from hidden to output layers
  83.  
  84. '''
  85. #### Implement the backward pass here ####
  86. ### Backward pass ###
  87.  
  88. #print(f'X : {X}')
  89. #print(f'X[:, None] : \n{X[:, None]}')
  90. #print(f'y : {y}')
  91. #print(f'self.lr : {self.lr}')
  92. #print(f'final_outputs : {final_outputs}')
  93. #print(f'hidden_outputs : {hidden_outputs}')
  94. #print(f'delta_weights_i_h : \n{delta_weights_i_h}')
  95. #print(f'delta_weights_h_o : \n{delta_weights_h_o}')
  96.  
  97.  
  98.  
  99.  
  100. # TODO: Output error - Replace this value with your calculations.
  101. error = y - final_outputs # Output layer error is the difference between desired target and actual output.
  102. #print(f'error : {error}')
  103.  
  104. hidden_error = error * self.weights_hidden_to_output.T
  105. #print(f'hidden_error : {hidden_error}')
  106.  
  107.  
  108. # TODO: Backpropagated error terms - Replace these values with your calculations.
  109. output_error_term = error
  110. #print(f'output_error_term : {output_error_term}')
  111. #print(f'weights hidden to output : \n{self.weights_hidden_to_output}')
  112.  
  113.  
  114. # TODO: Calculate the hidden layer's contribution to the error
  115.  
  116.  
  117.  
  118. hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
  119. #print(f'hidden_error_term : {hidden_error_term}')
  120.  
  121.  
  122. # Weight step (input to hidden)
  123. delta_weights_i_h += np.dot(X[:,None], hidden_error_term)
  124. # Weight step (hidden to output)
  125. #h_o = self.lr * output_error_term * hidden_outputs
  126. #delta_weights_h_o += np.expand_dims(h_o, axis=0).transpose()
  127. delta_weights_h_o += output_error_term * hidden_outputs[:, None]
  128.  
  129. return delta_weights_i_h, delta_weights_h_o
  130.  
  131. def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
  132. ''' Update weights on gradient descent step
  133.  
  134. Arguments
  135. ---------
  136. delta_weights_i_h: change in weights from input to hidden layers
  137. delta_weights_h_o: change in weights from hidden to output layers
  138. n_records: number of records
  139.  
  140. '''
  141. self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
  142. self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
  143.  
  144. def run(self, features):
  145. ''' Run a forward pass through the network with input features
  146.  
  147. Arguments
  148. ---------
  149. features: 1D array of feature values
  150. '''
  151.  
  152. #### Implement the forward pass here ####
  153. # TODO: Hidden layer - replace these values with the appropriate calculations.
  154. hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
  155. hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
  156.  
  157. # TODO: Output layer - Replace these values with the appropriate calculations.
  158. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
  159. final_outputs = final_inputs # signals from final output layer
  160.  
  161. return final_outputs
  162.  
  163.  
  164. #########################################################
  165. # Set your hyperparameters here
  166. ##########################################################
  167. iterations = 5000
  168. learning_rate = 0.4
  169. hidden_nodes = 8
  170. output_nodes = 1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement