Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch.autograd import Variable
- import torch.nn as nn
- import numpy as np
- N, D_in, H, D_out = 64, 1000, 100, 10
- dtype = torch.FloatTensor
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.linear1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)
- self.linear2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
- def forward(self, x):
- x = x.mm(self.linear1) # Linear Layer
- x = x.clamp(min=0) # ReLU
- x = x.mm(self.linear2) # Linear Layer
- return x
- def parameters(self):
- return [self.linear1, self.linear2]
- # loss_function
- def MSELoss(y, y_pred):
- loss = (y_pred - y).pow(2).sum()
- return loss
- # Input
- x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
- # Output
- y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
- # Center-Variable
- center = Variable(torch.randn(num_class, D_out).type(dtype), requires_grad=True)
- # Network
- net = Net()
- # Optimizer
- optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)
- # Forward Pass
- out = net(x)
- loss = MSELoss(y, out)
- # compute gradient and do SGD step
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement