Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from scipy.spatial import distance
- import numpy as np
- from sklearn.base import BaseEstimator
- class LinearReg(BaseEstimator):
- #eta == step
- def __init__(self, gd_type, loss_type="MSE",
- tolerance=1, max_iter=15, w0=None, alpha=0.001, eta=0.01):
- self.loss_type = loss_type
- self.gd_type = gd_type
- self.tolerance = tolerance
- self.max_iter = max_iter
- self.w0 = w0
- self.alpha = alpha
- self.w = None
- self.eta = eta
- self.iterations = 0
- self.loss_history = None # list of loss function values at each training iteration
- def fit(self, _X, _y):
- if (self.gd_type == 'stohastic'):
- indexes = np.random.randint(_X.shape[0], size = 300)
- y = _y[indexes]
- X = _X[indexes, :]
- else:
- X = _X
- y = _y
- self.loss_history = []
- self.w = np.random.uniform(0.8, 0.9, (X.shape[1]))
- self.w0 = np.random.uniform(1.5, 1.6, (X.shape[1]))
- grad_prew = (self.w0).copy()
- grad_cur = (self.w).copy()
- c = 0 #iterations
- for i in range(self.max_iter):
- if (self.gd_type == 'full' or self.gd_type == 'stohastic'):
- new_w = self.w0 - self.eta * self.calc_gradient(X, y)
- print("CALC_GRAD: = ", self.calc_gradient(X, y))
- self.w0 = self.w.copy()
- self.w = new_w.copy()
- else:
- grad_cur = self.alpha * self.w0 + self.eta * self.calc_gradient(X, y)
- self.w0 = self.w.copy()
- self.w -= grad_cur
- с = c + 1
- self.loss_history.append(self.calc_loss(X, y))
- dist = distance.euclidean(self.w0, self.w)
- if (dist <= self.tolerance): break
- self.iterations = c
- return self
- pass
- def predict(self, X):
- if self.w is None:
- raise Exception('Not trained yet')
- prediction = np.dot(X, (self.w).T)
- return prediction
- pass
- def calc_gradient(self, X, y):
- if (self.loss_type == 'R'):
- return 2 * (np.dot(X.T, self.predict(X) - y))/(np.sum((y - np.mean(y))**2, axis = 0))
- elif (self.loss_type == 'MSE'):
- return (2 / X.shape[0]) * np.dot(X.T, self.predict(X) - y.T)
- pass
- def calc_loss(self, X, y):
- if (self.loss_type == 'MSE'):
- return np.mean((np.dot(X, (self.w).T) - y.T)**2, axis=0)
- else:
- return (1 - (np.sum((self.predict(X) - y)**2, axis = 0) / np.sum((y - np.mean(y))**2, axis = 0)))
- pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement