Advertisement
Guest User

Untitled

a guest
Feb 20th, 2018
73
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.26 KB | None | 0 0
  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3.  
  4. bias = 1
  5.  
  6. # XOR Function (x, y, b, l)
  7. xorFunction = [[1, 1, bias, 0],
  8. [1, 0, bias, 1],
  9. [0, 1, bias, 1],
  10. [0, 0, bias, 0]]
  11.  
  12. # Weight Configuration
  13. h1p = 0.65
  14. h2p = 1.10
  15. b = -1.00
  16. x_x = -5.00
  17. x_y = 1.90
  18.  
  19. # Weights (w_x, w_y, b)
  20. h1Weights = [h1p, h1p, b]
  21. h2Weights = [h2p, h2p, b]
  22. xorWeights = [x_x, x_y, b]
  23.  
  24. # Dot product
  25. def netValue (weights, inputs):
  26. netVal = 0
  27. for i in range(len(weights)):
  28. netVal = netVal + (weights[i] * inputs[i])
  29. return netVal
  30.  
  31. # Activation function using tanh
  32. def tanhActivation (x):
  33. return np.tanh(x)
  34.  
  35. # Activation function using the step function
  36. def stepActivation (x):
  37. if x>=0:
  38. return 1
  39. else:
  40. return 0
  41.  
  42. # Update function using stepActivation
  43. # @inputs is a row of xorFunction
  44. def stepUpdate (weights, inputs, learningRate):
  45. dataLabel = inputs[3]
  46. # dataPoint = inputs - dataLabel
  47. dataPoint = inputs[0:3]
  48. # actVal = netValue --> activation
  49. actVal = stepActivation(netValue(weights,dataPoint))
  50. for i in range(len(weights)):
  51. ##this line below here
  52. weights[i] = weights[i] + learningRate*dataPoint[i]*(dataLabel-actVal)
  53. ##that line above here
  54.  
  55. return weights
  56.  
  57. ##### MAIN FUNCTION #####
  58.  
  59. # Learning
  60. epochs = 10000
  61. learningRate = 0.01
  62. for epoch in range(epochs):
  63. for i in range(len(xorFunction)): # each data point
  64. dataRow = xorFunction[i]
  65. dataLabel = dataRow[3]
  66. dataPoint = dataRow[0:3]
  67. # hlVal1 = netValue --> activation (of first node of HL)
  68. hlVal1= tanhActivation(netValue(h1Weights, dataPoint))
  69. # hlVal2 = netValue --> activation (of second node)
  70. hlVal2 = tanhActivation(netValue(h2Weights, dataPoint))
  71. # construct the input "row" for the next layer
  72. xorInput = [hlVal1, hlVal2, bias, dataLabel]
  73. # perform an update (step)
  74. xorWeights = stepUpdate(xorWeights, xorInput, learningRate)
  75.  
  76. # Plotting
  77. incVal = .05 # resolution
  78. minVal = -1
  79. maxVal = 2.2
  80. size = incVal * 250 # calculates reasonable size for points
  81.  
  82. print ("Generating plot... ", end="", flush=True)
  83. xValues = np.arange(minVal,maxVal,incVal)
  84. yValues = np.arange(minVal,maxVal,incVal)
  85. x,y = np.meshgrid(xValues, yValues)
  86. testGrid = np.array((x.ravel(), y.ravel())).T
  87.  
  88. for i in range(len(testGrid)):
  89. # generate data "row" from testGrid
  90. features = [testGrid[i][0], testGrid[i][1], bias]
  91. # actVal1/actVal2 = netValue --> activation
  92. actVal1 = tanhActivation(netValue(h1Weights, features))
  93. actVal2 = tanhActivation(netValue(h2Weights, features))
  94. # construct next layer data "row"
  95. xorInput = [actVal1, actVal2, bias]
  96. # result = netValue --> activation
  97. result = stepActivation(netValue(xorWeights, xorInput))
  98. if (result == 1):
  99. plt.plot(features[0], features[1], 'bs', ms=size, label='True')
  100. else:
  101. plt.plot(features[0], features[1], 'rs', ms=size, label='False')
  102.  
  103. plt.xlim((minVal, maxVal - incVal))
  104. plt.ylim((minVal, maxVal - incVal))
  105. plt.title('XOR function (p xor q)')
  106. plt.xlabel('p value')
  107. plt.ylabel('q value')
  108. plt.legend()
  109. plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement