SHARE
TWEET

Untitled

a guest Jul 24th, 2019 55 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. from __future__ import print_function
  2.  
  3. import argparse
  4. import torch.backends.cudnn as cudnn
  5. import torch.nn.functional as F
  6. import torch.optim as optim
  7. import torch.utils.data.distributed
  8. from torchvision import models
  9. import horovod.torch as hvd
  10. import timeit
  11. import numpy as np
  12. # Apex
  13. from apex import amp
  14.  
  15. # Benchmark settings
  16. parser = argparse.ArgumentParser(description='PyTorch Synthetic Benchmark',
  17.                                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  18. parser.add_argument('--fp16-allreduce', action='store_true', default=False,
  19.                     help='use fp16 compression during allreduce')
  20.  
  21. parser.add_argument('--model', type=str, default='resnet50',
  22.                     help='model to benchmark')
  23. parser.add_argument('--batch-size', type=int, default=32,
  24.                     help='input batch size')
  25.  
  26. parser.add_argument('--num-warmup-batches', type=int, default=10,
  27.                     help='number of warm-up batches that don\'t count towards benchmark')
  28. parser.add_argument('--num-batches-per-iter', type=int, default=10,
  29.                     help='number of batches per benchmark iteration')
  30. parser.add_argument('--num-iters', type=int, default=10,
  31.                     help='number of benchmark iterations')
  32.  
  33. parser.add_argument('--no-cuda', action='store_true', default=False,
  34.                     help='disables CUDA training')
  35.  
  36. args = parser.parse_args()
  37. args.cuda = not args.no_cuda and torch.cuda.is_available()
  38.  
  39. hvd.init()
  40.  
  41. if args.cuda:
  42.     # Horovod: pin GPU to local rank.
  43.     torch.cuda.set_device(hvd.local_rank())
  44.  
  45. cudnn.benchmark = True
  46.  
  47. # Set up standard model.
  48. model = getattr(models, args.model)()
  49.  
  50. if args.cuda:
  51.     # Move model to GPU.
  52.     model.cuda()
  53.  
  54. optimizer = optim.SGD(model.parameters(), lr=0.01)
  55.  
  56. # Horovod: (optional) compression algorithm.
  57. compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
  58.  
  59. # Horovod: wrap optimizer with DistributedOptimizer.
  60. optimizer = hvd.DistributedOptimizer(optimizer,
  61.                                      named_parameters=model.named_parameters(),
  62.                                      compression=compression)
  63.  
  64. # Horovod: broadcast parameters & optimizer state.
  65. hvd.broadcast_parameters(model.state_dict(), root_rank=0)
  66. hvd.broadcast_optimizer_state(optimizer, root_rank=0)
  67.  
  68. # Apex
  69. model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
  70.  
  71. # Set up fixed fake data
  72. data = torch.randn(args.batch_size, 3, 224, 224)
  73. target = torch.LongTensor(args.batch_size).random_() % 1000
  74. if args.cuda:
  75.     data, target = data.cuda(), target.cuda()
  76.  
  77.  
  78. def benchmark_step():
  79.     optimizer.zero_grad()
  80.     output = model(data)
  81.     loss = F.cross_entropy(output, target)
  82.     # Apex
  83.     with amp.scale_loss(loss, optimizer) as scaled_loss:
  84.         scaled_loss.backward()
  85.         optimizer.synchronize()
  86.     with optimizer.skip_synchronize():
  87.         optimizer.step()
  88.  
  89.  
  90. def log(s, nl=True):
  91.     if hvd.rank() != 0:
  92.         return
  93.     print(s, end='\n' if nl else '')
  94.  
  95.  
  96. log('Model: %s' % args.model)
  97. log('Batch size: %d' % args.batch_size)
  98. device = 'GPU' if args.cuda else 'CPU'
  99. log('Number of %ss: %d' % (device, hvd.size()))
  100.  
  101. # Warm-up
  102. log('Running warmup...')
  103. timeit.timeit(benchmark_step, number=args.num_warmup_batches)
  104.  
  105. # Benchmark
  106. log('Running benchmark...')
  107. img_secs = []
  108. for x in range(args.num_iters):
  109.     time = timeit.timeit(benchmark_step, number=args.num_batches_per_iter)
  110.     img_sec = args.batch_size * args.num_batches_per_iter / time
  111.     log('Iter #%d: %.1f img/sec per %s' % (x, img_sec, device))
  112.     img_secs.append(img_sec)
  113.  
  114. # Results
  115. img_sec_mean = np.mean(img_secs)
  116. img_sec_conf = 1.96 * np.std(img_secs)
  117. log('Img/sec per %s: %.1f +-%.1f' % (device, img_sec_mean, img_sec_conf))
  118. log('Total img/sec on %d %s(s): %.1f +-%.1f' %
  119.     (hvd.size(), device, hvd.size() * img_sec_mean, hvd.size() * img_sec_conf))
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top