Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import matplotlib.pyplot as plt
- bias = 1
- # XOR Function (x, y, b, l)
- xorFunction = [[1, 1, bias, 0],
- [1, 0, bias, 1],
- [0, 1, bias, 1],
- [0, 0, bias, 0]]
- # Weight Configuration
- h1p = 0.65
- h2p = 1.10
- b = -1.00
- x_x = -5.00
- x_y = 1.90
- # Weights (w_x, w_y, b)
- h1Weights = [h1p, h1p, b]
- h2Weights = [h2p, h2p, b]
- xorWeights = [x_x, x_y, b]
- # Dot product
- def netValue (weights, inputs):
- netVal = 0
- for i in range(len(weights)):
- netVal = netVal + (weights[i] * inputs[i])
- return netVal
- # Activation function using tanh
- def tanhActivation (x):
- return np.tanh(x)
- # Activation function using the step function
- def stepActivation (x):
- if x>=0:
- return 1
- else:
- return 0
- # Update function using stepActivation
- # @inputs is a row of xorFunction
- def stepUpdate (weights, inputs, learningRate):
- dataLabel = inputs[3]
- # dataPoint = inputs - dataLabel
- dataPoint = inputs
- del dataPoint[3]
- # actVal = netValue --> activation
- actVal = stepActivation(netValue(weights,inputs))
- for i in range(len(weights)):
- ##this line below here
- weights[i] = weights[i] + learningRate*dataPoint[i]*(dataLabel-actVal)
- ##that line above here
- return weights
- ##### MAIN FUNCTION #####
- # Learning
- epochs = 10000
- learningRate = 0.01
- for epoch in range(epochs):
- for i in range(len(xorFunction)): # each data point
- dataRow = xorFunction[i]
- dataLabel = xorFunction[3]
- dataPoint = xorFunction
- del dataPoint[3]
- # hlVal1 = netValue --> activation (of first node of HL)
- hlVal1= tanhActivation(netValue(h1Weights, dataPoint))
- # hlVal2 = netValue --> activation (of second node)
- hlVal2 = tanhActivation(netValue(h2Weights, dataPoint))
- # construct the input "row" for the next layer
- xorInput = [hlVal1, hlVal2, bias, dataLabel]
- # perform an update (step)
- xorWeights = stepUpdate(xorWeights, xorInput, learningRate)
- # Plotting
- incVal = .05 # resolution
- minVal = -1
- maxVal = 2.2
- size = incVal * 250 # calculates reasonable size for points
- print ("Generating plot... ", end="", flush=True)
- xValues = np.arange(minVal,maxVal,incVal)
- yValues = np.arange(minVal,maxVal,incVal)
- x,y = np.meshgrid(xValues, yValues)
- testGrid = np.array((x.ravel(), y.ravel())).T
- for i in range(len(testGrid)):
- # generate data "row" from testGrid
- features = [testGrid[i][0], testGrid[i][1], bias]
- # actVal1/actVal2 = netValue --> activation
- actVal1 = tanhActivation(netValue(h1Weights, features))
- actVal2 = tanhActivation(netValue(h2Weights, features))
- # construct next layer data "row"
- xorInput = [actVal1, actVal2, bias]
- # result = netValue --> activation
- result = stepActivation(netValue(xorWeights, xorInput))
- if (result == 1):
- plt.plot(features[0], features[1], 'bs', ms=size, label='True')
- else:
- plt.plot(features[0], features[1], 'rs', ms=size, label='False')
- plt.xlim((minVal, maxVal - incVal))
- plt.ylim((minVal, maxVal - incVal))
- plt.title('XOR function (p xor q)')
- plt.xlabel('p value')
- plt.ylabel('q value')
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement