Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def loss(X, t, w, lamb, n):
- return 1 / 2 * np.sum((t - w @ return_phi(X, n).T)**2)
- def gradient(X, t, w, lamb, n):
- return -np.dot(t - np.dot(w, return_phi(X, n).T), return_phi(X, n)) + lamb * w
- def gradient_descent(X, t, n, step, lamb):
- loss_vals = []
- w_next = np.random.rand(n + 1).reshape((1, n + 1)) / 100
- cant_stop = True
- while cant_stop:
- w_old = w_next
- w_next = w_old - step * gradient(X, t, w_old, lamb, n)
- loss_vals.append(loss)
- if np.linalg.norm(w_next - w_old) <= eps * np.linalg.norm(w_next) * eps0:
- cant_stop = False
- return loss_vals, w_next
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement