Advertisement
NLinker

TwoLayerNet example

Apr 13th, 2019
405
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.09 KB | None | 0 0
  1. def small_test():
  2.     num_inputs = 5
  3.     input_size = 4
  4.     hidden_size = 10
  5.     ouptut_size = 3
  6.  
  7.     np.random.seed(0)
  8.     model = TwoLayerNet(input_size=input_size, hidden_size=hidden_size, output_size=ouptut_size, reg=0.0, std=1e-1)
  9.  
  10.     np.random.seed(1)
  11.     x = 10 * np.random.randn(num_inputs, input_size)
  12.     y = np.array([0, 1, 2, 2, 1])
  13.  
  14.     # scores = model.compute_loss_and_gradients(x, None)
  15.     return x, y, model
  16.  
  17. x, y, model = small_test()
  18.  
  19. scores = model.compute_loss_and_gradients(x, None)
  20. loss, grads = model.compute_loss_and_gradients(x, y, reg=0.05)
  21.  
  22. correct_loss = 1.30378789133
  23. correct_scores = np.asarray([
  24.   [-0.81233741, -1.27654624, -0.70335995],
  25.   [-0.17129677, -1.18803311, -0.47310444],
  26.   [-0.51590475, -1.01354314, -0.8504215 ],
  27.   [-0.15419291, -0.48629638, -0.52901952],
  28.   [-0.00618733, -0.12435261, -0.15226949]])
  29. # print("model.w1 =", model.params['w1'])
  30. # print("model.w2 =", model.params['w2'])
  31. print("correct_loss - loss =", np.sum(np.abs(correct_loss - loss)))
  32. print("scores - correct_scores =", np.sum(np.abs(scores - correct_scores)))
  33. print("xy =", np.sum(x), np.sum(y))
  34. print("net.params =", {k.lower(): np.sum(v) for k, v in model.params.items()})
  35. print("net.grads =", {k.lower(): np.sum(v) for k, v in grads.items()})
  36. # print("params = ", {k: v.shape for k, v in params.items()})
  37. # print("grads = ", {k: v.shape for k, v in grads.items()})
  38.  
  39. def small_train(x, y, batch_size=200):
  40.     x_val = x
  41.     y_val = y
  42.    
  43.     learning_rate=0.1
  44.     learning_rate_decay=0.95
  45.     num_iters=200
  46.     reg=0.005
  47.     num_train = x.shape[0]
  48.     iterations_per_epoch = max(num_train / batch_size, 1)
  49.  
  50.     loss_history = []
  51.     train_acc_history = []
  52.     val_acc_history = []
  53.  
  54.     for it in range(num_iters):
  55.         idx = np.random.choice(num_train, batch_size, replace=True)
  56.         x_batch = x[idx]
  57.         y_batch = y[idx]
  58.         loss, grads = model.compute_loss_and_gradients(x_batch, y=y_batch)
  59.         loss_history.append(loss)
  60.         model.params['w2'] += - learning_rate * grads['w2']
  61.         model.params['b2'] += - learning_rate * grads['b2']
  62.         model.params['w1'] += - learning_rate * grads['w1']
  63.         model.params['b1'] += - learning_rate * grads['b1']
  64.         if it % 100 == 0:
  65.             print('iteration %d / %d: loss %f' % (it, num_iters, loss))
  66.        
  67.         if it % iterations_per_epoch == 0:
  68.             # Check accuracy
  69.             train_acc = (model.predict(x_batch) == y_batch).mean()
  70.             val_acc = (model.predict(x_val) == y_val).mean()
  71.             train_acc_history.append(train_acc)
  72.             val_acc_history.append(val_acc)
  73.             learning_rate *= learning_rate_decay
  74.     # finish the loop
  75.     return {
  76.         'loss_history': loss_history,
  77.         'train_acc_history': train_acc_history,
  78.         'val_acc_history': val_acc_history,
  79.     }
  80.  
  81. stats = small_train(x, y, batch_size=200)
  82.  
  83. print('Final training loss: ', stats['loss_history'][-1])
  84.  
  85. # plot the loss history
  86. plt.plot(stats['loss_history'])
  87. plt.xlabel('iteration')
  88. plt.ylabel('training loss')
  89. plt.title('Training Loss history')
  90. plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement