Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #ANSQ3
- def sse(output, target):
- return 0.5 * (output - target) ** 2;
- def gradSse(output, target):
- return output - target;
- def neuron(weights, inputs):
- return sigma(np.dot(weights,inputs))
- def gradNeuron(weights, inputs):
- return neuron(weights, inputs) * (1 - neuron(weights, inputs)) * inputs
- #/ANSQ3
- #################################################################################
- #ANSQ4
- delta = 1e-5
- for i in range(10):
- w = np.random.randn(5)
- x = np.random.randn(5)
- gw = 0.
- # Your code comes here...
- FDlist = []
- for j in range(5):
- wd1 = w.copy(); wd1[j] = wd1[j]+delta/2
- wd2 = w.copy(); wd2[j] = wd2[j]-delta/2
- FDlist.append((neuron(wd1,x)-neuron(wd2,x))/delta)
- analytical = gradNeuron(w,x)
- print ("FD: ", np.round(FDlist, 8))
- print ("Analytical", analytical)
- print (" -- DIFF ", FDlist - analytical)
- #/ANSQ4
- ##############
- #ANSQ5
- # Start by augmenting the data with a 1 so that w[0] is the bias
- X = np.ones((train.shape[0],2))
- X[:,1] = train[:,0]
- def errorFun(w):
- err_sum = 0
- for n in range(len(traint)):
- out_n = neuron(w,X[n])
- err_n = sse(out_n, traint[n])
- err_sum += err_n
- return err_sum
- def gradFun(w):
- grad_sum = 0
- for n in range(len(traint)):
- out_n = neuron(w,X[n])
- grad_sum += gradSse(out_n,traint[n])*gradNeuron(w,X[n])
- return grad_sum
- w = np.zeros(2)
- w_fit, err, times = gradDesc(w,errorFun,gradFun,verbose=False)
- fitline = np.zeros(len(traint))
- for n in range(len(traint)):
- fitline[n] = neuron(w_fit,X[n,:])
- print(err)
- plt.figure(1)
- plt.plot(err)
- plt.figure(2)
- plt.plot(train, traint, 'bo', train, fitline);
- #/ANSQ5
- ########################
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement