Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import util
- import numpy as np
- import matplotlib.pyplot as plt
- np.seterr(all='raise')
- factor = 2.0
- class LinearModel(object):
- """Base class for linear models."""
- def __init__(self,beta=0):
- """
- Args:
- theta: Weights vector for the model.
- beta: Regularization coefficient for the model.
- """
- self.theta = None
- self.beta = beta
- def fit(self, x, y):
- """Run solver to fit linear model. You have to update the value of
- self.theta using the normal equations.
- Args:
- x: Training example inputs. Shape (n, d).
- y: Training example labels. Shape (n,).
- """
- # *** START CODE HERE ***
- self.theta = np.linalg.inv(x.T@x)@x.T@y
- # *** END CODE HERE ***
- def create_poly(self,k,x):
- """
- Generates a polynomial feature maps using the data x.
- The polynomial map should have powers from 0 to k
- Output should be a numpy array whose shape is (n, k+1)
- Args:
- x: Training example inputs. Shape (n, 2).
- """
- # *** START CODE HERE ***
- n = x.shape[0]
- output = np.zeros([n, k + 1])
- for i in range(k + 1):
- output[:,i] = np.power(x[:,1],i);
- print("create_poly = ", output)
- return output
- # *** END CODE HERE ***
- def create_sin(self,k,x):
- """
- Adds a sin featuremap to the data x.
- Output should be a numpy array whose shape is (m, p+1)
- Args:
- x: Training example inputs. Shape (m, p).
- """
- # *** START CODE HERE ***
- # add sign of x to last column
- m = x.shape[0]
- p = x.shape[1]
- print("m, p = ", m, p)
- output = np.zeros([m,p+1])
- output[:,:-1] = x
- output[:,-1] = np.sin(x[:,1])
- print("create_sin = ", output)
- return output
- # *** END CODE HERE ***
- def predict(self, x):
- """
- Make a prediction given new inputs x.
- Returns the numpy array of the predictions.
- Args:
- x: Inputs of shape (n, d).
- Returns:
- Outputs of shape (n,).
- """
- # *** START CODE HERE ***
- output = x @ self.theta
- print("predict = ", output)
- return output
- # *** END CODE HERE ***
- def mse(self,y_true,y_pred):
- """
- Evaluate accuracy of predictions.
- Returns the mean square error of the predictions.
- Args:
- y_true: Inputs of shape (n,).
- y_pred: Inputs of shape (n,).
- Returns:
- Outputs of shape (1)
- """
- # *** START CODE HERE ***
- # average of squares of errors
- n = y_true.shape[0]
- mse = np.square(y_true - y_pred).mean()
- print("mse = ", mse)
- return mse
- # *** END CODE HERE ***
- def run_exp(train_path, sine=False, ks=[1, 2, 3, 5, 10, 20], filename='plot.png'):
- train_x,train_y=util.load_dataset(train_path,add_intercept=True)
- plot_x = np.ones([1000, 2])
- plot_x[:, 1] = np.linspace(-factor*np.pi, factor*np.pi, 1000)
- plt.scatter(train_x[:, 1], train_y)
- for k in ks:
- '''
- Our objective is to train models and perform predictions on plot_x data
- '''
- # *** START CODE HERE ***
- x_train, y_train = util.load_dataset(train_path, add_intercept=True)
- model = LinearModel()
- new_xtrain = model.create_poly(k,x_train)
- if (sine):
- new_xtrain = model.create_sin(k, new_xtrain)
- model.fit(new_xtrain, y_train)
- # *** END CODE HERE ***
- '''
- Here plot_y are the predictions of the linear model on the plot_x data
- '''
- x_plot = model.create_poly(k, plot_x)
- if (sine):
- x_plot = LinearModel.create_sin(model, k, x_plot)
- plot_y = model.predict(x_plot)
- plt.ylim(-2, 2)
- plt.plot(plot_x[:, 1], plot_y, label='k=%d' % k)
- plt.legend()
- plt.savefig(filename)
- plt.clf()
- def main(train_path, small_path, eval_path):
- '''
- Run all experiments
- '''
- # *** START CODE HERE ***
- # run_exp(train_path, ks = [3], filename = 'part_b.png')
- # run_exp(train_path, ks = [3, 5, 10, 20], filename = 'part_c.png')
- # run_exp(train_path, sine = True, ks = [1, 2, 3, 5, 10, 20], filename = 'part_d.png')
- run_exp(train_path, sine = True, ks = [12], filename = '20.png')
- # run_exp(small_path, sine = True, ks = [1, 2, 5, 10, 20], filename = 'part_e.png')
- # ADD ZERO LATER
- # *** END CODE HERE ***
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement