Advertisement
Guest User

Untitled

a guest
Jul 19th, 2019
89
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.28 KB | None | 0 0
  1. def train(x, y, learning_rate = 1e-3, reg = 1e-5, num_iters = 100, batch_size = 200, verbose = False):
  2.  
  3. num_train, dim = x.shape
  4. ## assuming that there will be atleast one example of class (k - 1) when there are k classes from (0) to (k-1)
  5. num_classes = np.max(y) + 1
  6.  
  7. ## intializing parameters
  8. W = 0.001 * np.random.randn(dim, num_classes)
  9.  
  10. loss_history = []
  11.  
  12. for it in range(num_iters):
  13. ## sampling some random indices
  14. sample_indices = np.random.choice(np.arange(num_train), batch_size)
  15.  
  16. ## creating the batches
  17. x_batch = x[sample_indices]
  18. y_batch = y[sample_indices]
  19.  
  20. ## evaluating loss and gradient
  21. loss, grad = softmax_loss_vectorized(W, x_batch, y_batch, reg)
  22. loss_history.append(loss)
  23.  
  24. ## changing the weights
  25. W = W - learning_rate * grad
  26.  
  27. ## printing is verbose is true
  28. if(verbose==True and it%100==0):
  29. print('Iteration {}/{} : Loss {}'.format(it,num_iters,loss))
  30.  
  31. return loss_history, W
  32.  
  33. ## calling the train function
  34. loss_hist, new_W = train(x_train, y_train, learning_rate = 1e-7, reg = 2.5e4, num_iters = 1500, verbose = True)
  35.  
  36. ## making new_W equal to W
  37. W = new_W
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement