Guest User

Untitled

a guest
Jan 21st, 2019
73
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.12 KB | None | 0 0
  1. import numpy as np
  2.  
  3.  
  4. # sigmoid function
  5. def nonlin(x):
  6. return 1 / (1 + np.exp(-x))
  7.  
  8.  
  9. # sigmoid derivative
  10. def noniln_deriv(x):
  11. return x * (1 - x)
  12.  
  13.  
  14. # input
  15. X = np.array([[0, 0, 1],
  16. [0, 1, 1],
  17. [1, 0, 1],
  18. [1, 1, 1]])
  19.  
  20.  
  21. # output - X1,: ^ X2,:
  22. Y = np.array([[0],
  23. [1],
  24. [1],
  25. [0]])
  26.  
  27.  
  28. np.random.seed(0)
  29.  
  30.  
  31. # synapses initialization
  32. syn0 = 2 * np.random.random((3, 4)) # input -> hidden layer weights matrix
  33. syn1 = 2 * np.random.random((4, 1)) # hidden layer -> output weights matrix
  34.  
  35.  
  36. # training
  37. for j in range(80000):
  38.  
  39. # forward
  40. l0 = X # input layer
  41. l1 = nonlin(np.dot(l0, syn0)) # hidden layer
  42. l2 = nonlin(np.dot(l1, syn1)) # output layer
  43.  
  44. # Back propagation of errors using chain rule
  45. l2_error = Y - l2
  46.  
  47. if (j % 10000) == 0:
  48. print(f'Error: {str(np.mean(np.abs(l2_error)))}')
  49.  
  50. l2_delta = l2_error * noniln_deriv(l2)
  51.  
  52. l1_error = l2_delta.dot(syn1.T)
  53.  
  54. l1_delta = l1_error * noniln_deriv(l1)
  55.  
  56. # unpdate weights
  57. syn1 += l1.T.dot(l2_delta)
  58. syn0 += l0.T.dot(l1_delta)
  59.  
  60.  
  61. print('Output after training:')
  62. print(l2)
Add Comment
Please, Sign In to add comment