Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train(x, y, learning_rate = 1e-3, reg = 1e-5, num_iters = 100, batch_size = 200, verbose = False):
- num_train, dim = x.shape
- ## assuming that there will be atleast one example of class (k - 1) when there are k classes from (0) to (k-1)
- num_classes = np.max(y) + 1
- ## intializing parameters
- W = 0.001 * np.random.randn(dim, num_classes)
- loss_history = []
- for it in range(num_iters):
- ## sampling some random indices
- sample_indices = np.random.choice(np.arange(num_train), batch_size)
- ## creating the batches
- x_batch = x[sample_indices]
- y_batch = y[sample_indices]
- ## evaluating loss and gradient
- loss, grad = softmax_loss_vectorized(W, x_batch, y_batch, reg)
- loss_history.append(loss)
- ## changing the weights
- W = W - learning_rate * grad
- ## printing is verbose is true
- if(verbose==True and it%100==0):
- print('Iteration {}/{} : Loss {}'.format(it,num_iters,loss))
- return loss_history, W
- ## calling the train function
- loss_hist, new_W = train(x_train, y_train, learning_rate = 1e-7, reg = 2.5e4, num_iters = 1500, verbose = True)
- ## making new_W equal to W
- W = new_W
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement