Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from sklearn.base import BaseEstimator
- class LinearReg(BaseEstimator):
- def __init__(self, gd_type='stochastic',
- tolerance=1e-4, max_iter=1000, w0=None, alpha=1e-3, eta=1e-2):
- """
- gd_type: 'full' or 'stochastic' or 'momentum'
- tolerance: for stopping gradient descent
- max_iter: maximum number of steps in gradient descent
- w0: np.array of shape (d) - init weights
- eta: learning rate
- alpha: momentum coefficient
- """
- self.gd_type = gd_type
- self.tolerance = tolerance
- self.max_iter = max_iter
- self.w0 = w0
- self.alpha = alpha
- self.w = None
- self.eta = eta
- self.loss_history = None # list of loss function values at each training iteration
- def fit(self, X, y):
- """
- X: np.array of shape (ell, d)
- y: np.array of shape (ell)
- ---
- output: self
- """
- self.loss_history = []
- self.w = self.w0
- self.w_list = [self.w]
- for i in range(self.max_iter):
- print(self.w)
- self.w -= self.eta * self.calc_gradient(X, y)
- self.w_list.append(self.w)
- self.w_list = np.array(self.w_list)
- return self
- def predict(self, X):
- if self.w is None:
- raise Exception('Not trained yet')
- return np.dot(X, self.w)
- def calc_gradient(self, X, y):
- np.dot(X, self.w)
- return 2 * np.dot(X.T, np.dot(X, self.w) - y) / y.shape[0]
- def calc_loss(self, X, y):
- return np.mean(np.power(np.dot(X, self.w) - y, 2))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement