Advertisement
Guest User

Pytorch slowdown test

a guest
Oct 26th, 2017
179
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. import torch
  2. import time
  3. from torch.autograd import Variable
  4. import numpy as np
  5.  
  6. """
  7. This program gets slower and slower by time. No noticeable change in memory
  8. consumption seen on nvidia-smi.
  9. """
  10.  
  11. usecuda = 1
  12.  
  13. dtype = torch.FloatTensor
  14. N=25
  15. if usecuda:
  16.   N = 100*N
  17.   dtype = torch.cuda.FloatTensor
  18.  
  19. xdim = (100,50,2)
  20.  
  21. x0 = torch.randn(*xdim).type(dtype)
  22. z0 = torch.randn(N, xdim[0]).type(dtype)
  23.  
  24. x = Variable(x0, requires_grad=True)
  25. z = Variable(z0, requires_grad=False)
  26.  
  27. def updateZ(z, b):
  28.   z -= (0.1*b.lt(0.1).sum(2).type(dtype) + 0.01)
  29.   z *= 0.999
  30.   return z
  31.  
  32. t0 = time.clock()
  33. tsum = 0
  34. tsump = 1
  35. i = 0
  36. while True:
  37.   b = (x.repeat(N,1,1,1).view(N, -1, 2)).pow(2).sum(2).sqrt()
  38.   c = z.view(N, xdim[0], 1).expand(N, xdim[0], xdim[1])*(1/b.view(N, xdim[0], xdim[1]).pow(0.5))
  39.   loss = c.sum()
  40.   loss.backward()
  41.   z = updateZ(z, b.view(N, -1 , xdim[1]))
  42.   x.data -= 0.0000000001*x.grad.data
  43.   x.grad.data.zero_()
  44.   t1 = time.clock()
  45.   tsum += t1 - t0
  46.   if i%100 == 99:
  47.     print("%.4f +%.1f%%" % (tsum, tsum*100/tsump - 100))
  48.     tsump = tsum
  49.     tsum = 0
  50.   i = i + 1
  51.   t0 = t1
Advertisement
RAW Paste Data Copied
Advertisement