Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import time
- from torch.autograd import Variable
- import numpy as np
- """
- This program gets slower and slower by time. No noticeable change in memory
- consumption seen on nvidia-smi.
- """
- usecuda = 1
- dtype = torch.FloatTensor
- N=25
- if usecuda:
- N = 100*N
- dtype = torch.cuda.FloatTensor
- xdim = (100,50,2)
- x0 = torch.randn(*xdim).type(dtype)
- z0 = torch.randn(N, xdim[0]).type(dtype)
- x = Variable(x0, requires_grad=True)
- z = Variable(z0, requires_grad=False)
- def updateZ(z, b):
- z -= (0.1*b.lt(0.1).sum(2).type(dtype) + 0.01)
- z *= 0.999
- return z
- t0 = time.clock()
- tsum = 0
- tsump = 1
- i = 0
- while True:
- b = (x.repeat(N,1,1,1).view(N, -1, 2)).pow(2).sum(2).sqrt()
- c = z.view(N, xdim[0], 1).expand(N, xdim[0], xdim[1])*(1/b.view(N, xdim[0], xdim[1]).pow(0.5))
- loss = c.sum()
- loss.backward()
- z = updateZ(z, b.view(N, -1 , xdim[1]))
- x.data -= 0.0000000001*x.grad.data
- x.grad.data.zero_()
- t1 = time.clock()
- tsum += t1 - t0
- if i%100 == 99:
- print("%.4f +%.1f%%" % (tsum, tsum*100/tsump - 100))
- tsump = tsum
- tsum = 0
- i = i + 1
- t0 = t1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement