Advertisement
Guest User

Untitled

a guest
Feb 14th, 2017
437
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 1.27 KB | None | 0 0
  1. import torch
  2. from torch.autograd import Variable
  3. import torch.nn as nn
  4. import numpy as np
  5.  
  6. N, D_in, H, D_out = 64, 1000, 100, 10
  7. dtype = torch.FloatTensor
  8.  
  9. class Net(nn.Module):
  10.     def __init__(self):
  11.         super(Net, self).__init__()
  12.         self.linear1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)
  13.         self.linear2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
  14.  
  15.     def forward(self, x):
  16.         x = x.mm(self.linear1) # Linear Layer
  17.         x = x.clamp(min=0) # ReLU
  18.         x = x.mm(self.linear2) # Linear Layer
  19.         return x
  20.    
  21.     def parameters(self):
  22.         return [self.linear1, self.linear2]
  23.    
  24.    
  25. # loss_function
  26. def MSELoss(y, y_pred):
  27.     loss = (y_pred - y).pow(2).sum()    
  28.     return loss
  29.  
  30. # Input
  31. x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
  32. # Output
  33. y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
  34. # Center-Variable
  35. center = Variable(torch.randn(num_class, D_out).type(dtype), requires_grad=True)
  36. # Network
  37. net = Net()
  38. # Optimizer
  39. optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)
  40.  
  41. # Forward Pass
  42. out  = net(x)
  43. loss = MSELoss(y, out)
  44.  
  45. # compute gradient and do SGD step
  46. optimizer.zero_grad()
  47. loss.backward()
  48. optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement