Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def tanh(x, derivative=False):
- if (derivative):
- return 1-(np.tanh(x)**2)
- else:
- return np.tanh(x)
- bias = 1
- learningrate = 0.5
- ##3 2 1 4
- ##c b a bias
- weights = np.array([
- [0.9999999732819997, 0.9999999465639994, 0.9999999732819997, 0.9998439262081439],
- [-2.154660263345219, -2.001883323187517, -0.5181052805955335, 0.9889430644706741],
- [0.9999999999999998, 0.9999999465639994, 0.9999999465639998, 0.9998439262081439],
- [-0.001879897900672004, -0.008565417076598794, -0.001879897900672004, -0.003450528541677388]])
- def add(x,y):
- f = open(y)
- lines = f.read()
- f.close()
- f = open(y, 'w')
- f.write(lines)
- f.write("\n<br>"+x)
- f.close()
- def hidden(a,b,c,d):
- global weights
- global bias
- alpha = weights*[c,b,a,1]
- if (d==0):
- return tanh(sum(alpha[0]))
- if (d==1):
- return tanh(sum(alpha[1]))
- if (d==2):
- return tanh(sum(alpha[2]))
- if (d==3):
- return np.array([tanh(sum(alpha[0])), tanh(sum(alpha[1])), tanh(sum(alpha[2])), 1])
- def u4(a, b, c):
- global weights
- global bias
- a = [hidden(a,b,c,2), hidden(a,b,c,1), hidden(a,b,c,0), 1]
- return tanh(sum(weights[3]*a))
- def feedforward(a, b, c):
- return u4(a,b,c)+1
- def error(a,b,c,d):
- return ((d-feedforward(a,b,c))**2)/100
- def backpropagation(a,b,c,d):
- global learningrate
- global bias
- global weights
- result = feedforward(a,b,c)
- delta0 = (d-result)*result*((bias-result)**2)
- delta3 = (delta0*weights[3][0])*hidden(a,b,c,2)*(1-hidden(a,b,c,2)**2)
- delta2 = (delta0*weights[3][1])*hidden(a,b,c,1)*(1-hidden(a,b,c,1)**2)
- delta1 = (delta0*weights[3][2])*hidden(a,b,c,0)*(1-hidden(a,b,c,0)**2)
- #adjustments
- outputAdjustment = hidden(a,b,c,3)*(learningrate*delta0)
- weights[3]+=outputAdjustment
- u3Adjustment = np.array([c,b,a,1])*(learningrate*delta3)
- weights[0]+= u3Adjustment
- u2Adjustment = np.array([c,b,a,1])*(learningrate*delta2)
- weights[1]+= u2Adjustment
- u1Adjustment = np.array([c,b,a,1])*(learningrate*delta1)
- weights[2]+= u1Adjustment
- def train(data):
- testing = [[2,3,4,0], [1,3,5,0], [1,5,3,2], [2,12,4,2], [4,8,4,0], [6,12,6,0]]
- passed = False
- count = 0
- while (not passed):
- for _ in range(500):
- avgerr = 0
- for i in data:
- backpropagation(i[0], i[1], i[2], i[3])
- avgerr += error(i[0], i[1], i[2], i[3])
- avgerr = avgerr/len(data)
- if (_%100 == 0):
- print avgerr
- add(str(avgerr), "index.html")
- c = 0
- for i in testing:
- if int(round(feedforward(i[0], i[1], i[2]))) == i[3]:
- c+=1
- if c == 6:
- break
- f = open('weights-temp.txt', 'w')
- f.write(str(weights))
- f.close()
- train(dataset)
- a = ""
- while a != "quit":
- a = raw_input(">> ")
- if a == "test":
- alpha = int(raw_input("alpha: "))
- beta = int(raw_input("beta: "))
- gamma = int(raw_input("gamma: "))
- print feedforward(alpha, beta, gamma)
- if a == "train":
- train(dataset)
- if a == "weights":
- print weights
- f = open('weights.txt', 'w')
- f.write(str(weights))
- f.close()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement