Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def loss(X, t, w, lamb, n):
- e_d = np.power(t - np.dot(w,return_phi(X,n).T),2).sum()/2
- e_r = lamb*np.power(w,2).sum()/2
- return (e_d+e_r)
- def gradient(X, t, w, lamb, n):
- # print(.shape,t.shape,w.shape)
- temp = -(t - np.dot(w,return_phi(X,n).T)).dot(return_phi(X,n))+lamb*w
- # print(temp)
- return temp
- def gradient_descent(X, t, n, step, lamb):
- loss_vals=[]
- w_next=np.random.rand(n+1).reshape((1,n+1))/100
- cant_stop=True
- count = 0
- print(X.shape, t.shape, n, step, lamb, w_next.shape)
- # print(return_phi(X,n).shape)
- while cant_stop:
- w_old = w_next
- w_next = w_old - step * gradient(X, t, w_old, lamb, n)
- # print(w_old, w_next)
- loss_vals.append(loss(X,t,w_next,lamb,n))
- if np.linalg.norm(w_old-w_next) < eps*np.linalg.norm(w_next) +eps0:
- cant_stop = False
- count += 1
- print(loss_vals[-1], count)
- return loss_vals, w_next
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement