Advertisement
Guest User

Untitled

a guest
Apr 9th, 2020
150
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 0.63 KB | None | 0 0
  1. def loss(X, t, w, lamb, n):
  2.     return 1 / 2 * np.sum((t - w @ return_phi(X, n).T)**2)
  3.  
  4.  
  5. def gradient(X, t, w, lamb, n):
  6.     return -np.dot(t - np.dot(w, return_phi(X, n).T), return_phi(X, n)) + lamb * w
  7.  
  8.  
  9. def gradient_descent(X, t, n, step, lamb):
  10.     loss_vals = []
  11.     w_next = np.random.rand(n + 1).reshape((1, n + 1)) / 100
  12.     cant_stop = True
  13.     while cant_stop:
  14.         w_old = w_next
  15.         w_next = w_old - step * gradient(X, t, w_old, lamb, n)
  16.         loss_vals.append(loss)
  17.         if np.linalg.norm(w_next - w_old) <= eps * np.linalg.norm(w_next) * eps0:
  18.             cant_stop = False
  19.     return loss_vals, w_next
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement