Advertisement
Guest User

Untitled

a guest
Sep 13th, 2016
140
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 1.84 KB | None | 0 0
  1. """A basic implementation of linear regression
  2.  
  3. Uses gradient descent for finding the minimum, and least squares
  4. as the loss function to be optimized.
  5. """
  6. import numpy as np
  7.  
  8. class LinearRegression():
  9.     """Basic implementation of linear regression"""
  10.  
  11.     def fit(self, x, y, lr=0.01, epochs=100):
  12.         """Calculate linear regression for numpy ndarrays
  13.      
  14.       Rows should be data points, and columns should be features
  15.       """
  16.         # Add bias
  17.         x = _append_bias(x)
  18.         # Extract number of features
  19.         n_features = x.shape[1]
  20.         w = _initialise_weights(n_features)
  21.         for i in range(epochs):
  22.             error = _mse(y, _activation(x, w))
  23.             w = _train_epoch(x, y, w, lr)
  24.         print("MSE: {}".format(error))
  25.         print("Weights: {}".format(w))
  26.         self.w = w
  27.  
  28.     def predict(self, x):
  29.         """Predict based on the weights computed previously"""
  30.         x = _append_bias(x)
  31.         return _activation(x, self.w)
  32.  
  33.  
  34. def _mse(a, b):
  35.     """Compute MSE for 2 vectors"""
  36.     return np.mean(np.square(a - b))
  37.  
  38.  
  39. def _activation(x, w):
  40.     """Activation function (dot product in this case)"""
  41.     return np.dot(x, w)
  42.  
  43.  
  44. def _partial_derivative_mse(x, y, w):
  45.     """Calculate partial derivatives for MSE"""
  46.     new_w = []
  47.     for feature in x:
  48.         new_w.append(-2 * np.mean(feature * (y - (_activation(x, w)))))
  49.     return new_w
  50.  
  51.  
  52. def _train_epoch(x, y, w, lr):
  53.     """Train for one epoch using gradient descent"""
  54.     gradient_w = np.zeros(x.shape[1])
  55.     for i in range(len(x)):
  56.         gradient_w += _partial_derivative_mse(x[i], y[i], w)
  57.     return w - lr * gradient_w
  58.  
  59.  
  60. def _initialise_weights(n):
  61.     return np.random.rand(n)
  62.  
  63. def _append_bias(x):
  64.     """Append 1 to each data point"""
  65.     return np.hstack((x, np.ones((x.shape[0], 1))))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement