Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def l2_rls_train(tr_data, tr_labels, regLambda, multiclass=False):
- """
- Add column of ones to tr_data, and calculate the optimal
- weight vector by setting th egradient to zero and using
- the matrix notation form to calculate the regularised least square model
- data: type and description of "data"
- labels: type and description of "labels"
- Returns: type and description of the returned variable(s).
- """
- # This is just to be consistent with the lecture notes.
- X, y = tr_data, tr_labels
- nrOfRows = X.shape[0]
- # Expand X with a column of ones.
- X_tilde = np.column_stack((np.ones(nrOfRows), X))
- # if multiclass change tr_labels into a matrix
- if multiclass:
- newY = np.zeros((tr_labels.shape[0], tr_labels.shape[0]))
- for i, el in enumerate(tr_labels):
- newY[i][el - 1] = 1
- y = newY
- nrOfColumns = X_tilde.shape[1]
- # Compute the coefficient vector.
- if regLambda == 0:
- w = np.linalg.pinv(X_tilde) @ y
- else:
- inverse = np.linalg.inv(np.dot( np.transpose(X_tilde), X_tilde) + np.identity(nrOfColumns) * regLambda)
- w = np.dot( inverse, np.transpose(X_tilde) ) @ y
- # Return model parameters.
- return w
- def l2_rls_predict(w, data):
- """
- A summary of your function goes here.
- data: type and description of "data"
- Returns: type and description of the returned variable(s).
- """
- # This is just to be consistent with the lecture notes.
- X = data
- # add column of ones (biases) to given data
- y = np.column_stack((np.ones(X.shape[0]), data))
- # Your code goes here
- return np.dot(y, w)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement