Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ##
- # LOGISTIC REGRESSION
- ##
- def sigmoid(x):
- """apply NUMERICALLY STABLE sigmoid function on x."""
- # ***************************************************
- # if x >= 0:
- # z = np.exp(-x)
- # return 1 / (1 + z)
- # else:
- # z = np.exp(x)
- # return z / (1 + z)
- return 1 / (1 + np.exp(-x))
- def calculate_logistic_gradient(y, tx, w):
- """compute the gradient of loss."""
- # ***************************************************
- #sig_function = np.vectorize(sigmoid)
- sig = sigmoid(np.dot(tx, w))
- return np.dot(tx.T, (sig - y) )
- def logistic_regression(y, tx, gamma, max_iter):
- """
- Logistic regression using gradient descent.
- Return the loss and the updated w.
- """
- # start the logistic regression
- w = np.zeros((tx.shape[1],1))
- for iter in range(max_iter):
- # ***************************************************
- # compute the gradient
- gradient = calculate_logistic_gradient(y, tx, w)
- # ***************************************************
- # update w
- # Tracer()()
- w = w - gamma * gradient
- #loss = calculate_loss_by_likelyhood(y, tx, w)
- loss = compute_loss(y, tx, w)
- return loss, w
- def calculate_hessian(y, tx, w):
- """return the hessian of the loss function."""
- # ***************************************************
- sig_function = np.vectorize(sigmoid)
- sig = sig_function(np.dot(tx, w))
- s_nn = sig*(1-sig)
- Tracer()()
- S = np.diag(np.ndarray.flatten(s_nn))
- return np.dot(tx.T, np.dot(S, tx))
- def penalized_logistic_regression(y, tx, w, lambd):
- """return the loss, gradient, and hessian."""
- # ***************************************************
- # return loss, gradient, and hessian
- #loss = calculate_loss_by_likelyhood(y, tx, w)
- gradient = calculate_logistic_gradient(y, tx, w)
- hessian = calculate_hessian(y, tx, w)
- #loss_penalty = lambd * np.sum(np.power(w, 2))
- gradient_penalty = lambd * 2 * w
- return gradient + gradient_penalty, hessian
- def reg_logistic_regression(y, tx, lambd , gamma, max_iters):
- """
- Penalized logistic regression using gradient descent.
- Return the loss and the updated w.
- """
- # start the logistic regression
- w = np.zeros((tx.shape[1],1))
- for iter in range(max_iters):
- # get loss and update w.
- gradient, hessian = penalized_logistic_regression(y, tx, w, lambd)
- # ***************************************************
- # update w
- w = w - gamma * np.dot(np.linalg.inv(hessian), gradient)
- # log info
- loss_penalty = lambd * np.sum(np.power(w, 2))
- loss = calculate_loss_by_likelyhood(y, tx, w) + loss_penalty
- return loss, w
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement