Advertisement
Guest User

MNIST MXNet DataIter

a guest
Jan 16th, 2020
191
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.27 KB | None | 0 0
  1. import mxnet as mx
  2. import numpy as np
  3. import random
  4. import time
  5.  
  6. from mxnet import autograd as ag
  7. from mxnet.io import NDArrayIter
  8. from mxnet.metric import Accuracy
  9. from mxnet.optimizer import Adam
  10. from mxnet.test_utils import get_mnist_iterator
  11. from mxnet.gluon import Block, Trainer
  12. from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
  13. from mxnet.gluon.nn import Conv2D, Dense, Dropout, Flatten, MaxPool2D, HybridBlock
  14. from mxnet.gluon.utils import split_and_load
  15.  
  16.  
  17. BATCH_SIZE_PER_REPLICA = 512
  18. BATCH_SIZE = BATCH_SIZE_PER_REPLICA * 2
  19. NUM_CLASSES = 10
  20. EPOCHS = 10
  21. GPU_COUNT = 2
  22.  
  23.  
  24. class Model(HybridBlock):
  25.     def __init__(self, **kwargs):
  26.         super(Model, self).__init__(**kwargs)
  27.         with self.name_scope():
  28.             self.conv1 = Conv2D(32, (3, 3))
  29.             self.conv2 = Conv2D(64, (3, 3))
  30.             self.pool = MaxPool2D(pool_size=(2, 2))
  31.             self.dropout1 = Dropout(0.25)
  32.             self.flatten = Flatten()
  33.             self.dense1 = Dense(128)
  34.             self.dropout2 = Dropout(0.5)
  35.             self.dense2 = Dense(NUM_CLASSES)
  36.  
  37.     def hybrid_forward(self, F, x):
  38.         x = F.relu(self.conv1(x))
  39.         x = F.relu(self.conv2(x))
  40.         x = self.pool(x)
  41.         x = self.dropout1(x)
  42.         x = self.flatten(x)
  43.         x = F.relu(self.dense1(x))
  44.         x = self.dropout2(x)
  45.         x = self.dense2(x)
  46.         return x
  47.    
  48.  
  49. mx.random.seed(42)
  50. random.seed(42)
  51.  
  52. # get data
  53. input_shape = (1, 28, 28)
  54. train_data, test_data = get_mnist_iterator(input_shape=input_shape,
  55.                                            batch_size=BATCH_SIZE)
  56.  
  57. # build nodel
  58. model = Model()
  59. # hybridize for speed
  60. model.hybridize(static_alloc=True, static_shape=True)
  61.  
  62. # pin GPUs
  63. ctx = [mx.gpu(i) for i in range(GPU_COUNT)]
  64.  
  65. # optimizer
  66. opt_params={'learning_rate':0.001, 'beta1':0.9, 'beta2':0.999, 'epsilon':1e-08}
  67. opt = mx.optimizer.create('adam', **opt_params)
  68. # initialize parameters
  69. model.initialize(force_reinit=True, ctx=ctx)
  70. # fetch and broadcast parameters
  71. params = model.collect_params()
  72. # trainer
  73. trainer = Trainer(params=params,
  74.                   optimizer=opt,
  75.                   kvstore='device')
  76. # loss function
  77. loss_fn = SoftmaxCrossEntropyLoss()
  78. # use accuracy as the evaluation metric
  79. metric = Accuracy()
  80.  
  81. start = time.perf_counter()
  82. for epoch in range(1, EPOCHS+1):
  83.     # reset the train data iterator.
  84.     train_data.reset()
  85.     # loop over the train data iterator
  86.     for i, batch in enumerate(train_data):
  87.         if i == 0:
  88.             tick_0 = time.time()
  89.         # splits train data into multiple slices along batch_axis
  90.         # copy each slice into a context
  91.         data = split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
  92.         # splits train labels into multiple slices along batch_axis
  93.         # copy each slice into a context
  94.         label = split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
  95.         outputs = []
  96.         losses = []
  97.         # inside training scope
  98.         with ag.record():
  99.             for x, y in zip(data, label):
  100.                 z = model(x)
  101.                 # computes softmax cross entropy loss
  102.                 l = loss_fn(z, y)
  103.                 outputs.append(z)
  104.                 losses.append(l)
  105.         # backpropagate the error for one iteration
  106.         for l in losses:
  107.             l.backward()
  108.         # make one step of parameter update.
  109.         # trainer needs to know the batch size of data
  110.         # to normalize the gradient by 1/batch_size
  111.         trainer.step(BATCH_SIZE)
  112.         # updates internal evaluation
  113.         metric.update(label, outputs)
  114.     str1 = 'Epoch [{}], Accuracy {:.4f}'.format(epoch, metric.get()[1])
  115.     str2 = '~Samples/Sec {:.4f}'.format(BATCH_SIZE*(i+1)/(time.time()-tick_0))
  116.     print('%s  %s' % (str1, str2))
  117.     # reset evaluation result to initial state.
  118.     metric.reset()
  119.  
  120. elapsed = time.perf_counter() - start
  121. print('elapsed: {:0.3f}'.format(elapsed))
  122.  
  123. # use Accuracy as the evaluation metric
  124. metric = Accuracy()
  125. for batch in test_data:
  126.     data = split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
  127.     label = split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
  128.     outputs = []
  129.     for x in data:
  130.         outputs.append(model(x))
  131.     metric.update(label, outputs)
  132. print('validation %s=%f' % metric.get())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement