Advertisement
Guest User

Untitled

a guest
Oct 14th, 2019
136
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.73 KB | None | 0 0
  1. import numpy as np
  2. from sklearn.base import BaseEstimator
  3.  
  4. class LinearReg(BaseEstimator):
  5. def __init__(self, gd_type='stochastic',
  6. tolerance=1e-4, max_iter=1000, w0=None, alpha=1e-3, eta=1e-2):
  7. """
  8. gd_type: 'full' or 'stochastic' or 'momentum'
  9. tolerance: for stopping gradient descent
  10. max_iter: maximum number of steps in gradient descent
  11. w0: np.array of shape (d) - init weights
  12. eta: learning rate
  13. alpha: momentum coefficient
  14. """
  15. self.gd_type = gd_type
  16. self.tolerance = tolerance
  17. self.max_iter = max_iter
  18. self.w0 = w0
  19. self.alpha = alpha
  20. self.w = None
  21. self.eta = eta
  22. self.loss_history = None # list of loss function values at each training iteration
  23.  
  24. def fit(self, X, y):
  25. """
  26. X: np.array of shape (ell, d)
  27. y: np.array of shape (ell)
  28. ---
  29. output: self
  30. """
  31.  
  32. self.loss_history = []
  33. self.w = self.w0
  34.  
  35. self.w_list = [self.w]
  36.  
  37. for i in range(self.max_iter):
  38.  
  39. print(self.w)
  40. self.w -= self.eta * self.calc_gradient(X, y)
  41. self.w_list.append(self.w)
  42. self.w_list = np.array(self.w_list)
  43.  
  44.  
  45. return self
  46.  
  47. def predict(self, X):
  48. if self.w is None:
  49. raise Exception('Not trained yet')
  50. return np.dot(X, self.w)
  51.  
  52. def calc_gradient(self, X, y):
  53. np.dot(X, self.w)
  54. return 2 * np.dot(X.T, np.dot(X, self.w) - y) / y.shape[0]
  55.  
  56.  
  57. def calc_loss(self, X, y):
  58. return np.mean(np.power(np.dot(X, self.w) - y, 2))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement