Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch.autograd import Variable
- import torch.nn as nn
- import numpy as np
- N, D_in, H, D_out, num_class = 64, 1000, 100, 10, 4
- dtype = torch.FloatTensor
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.linear1 = torch.nn.Linear(D_in, H)
- self.linear2 = torch.nn.Linear(H, D_out)
- def forward(self, x):
- h_relu = self.linear1(x).clamp(min=0)
- y_pred = self.linear2(h_relu)
- return y_pred, h_relu
- # loss_function
- def CenterLoss(y, y_pred, centers):
- centers_pred = center.index_select(0, y.long())
- difference = y_pred - centers_pred
- loss = difference.pow(2).sum() / (2* y.size()[0])
- return loss
- # Input
- x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
- # Output
- y = Variable(torch.Tensor(np.random.randint(0, num_class-1, size=N)).type(dtype), requires_grad=False)
- # Center-Variable
- center = Variable(torch.randn(num_class, H).type(dtype), requires_grad=True)
- # Network
- net = Net()
- # Classification Criterion
- criterion = nn.CrossEntropyLoss()
- # Optimizer
- optimizer = torch.optim.Adam([
- {'params': net.parameters()},
- {'params': [center]}
- ], lr=1e-2)
- # Forward Pass
- y_pred, features = net(x)
- loss = CenterLoss(y, features, center) + criterion(y_pred, y.long())
- # compute gradient and do SGD step
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement