Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torchvision
- import torchvision.transforms as transforms
- transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
- trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
- download=True, transform=transform)
- trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
- shuffle=True, num_workers=2)
- testset = torchvision.datasets.CIFAR10(root='./data', train=False,
- download=True, transform=transform)
- testloader = torch.utils.data.DataLoader(testset, batch_size=4,
- shuffle=False, num_workers=2)
- classes = ('plane', 'car', 'bird', 'cat',
- 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
- import torch.nn as nn
- import torch.nn.functional as F
- def make_sparse(in_dim, out_dim, size, mode):
- # Always compress the more compressible dimension
- invert = False
- if out_dim > in_dim:
- invert = True
- out_dim, in_dim = in_dim, out_dim
- mask = torch.zeros(out_dim, in_dim)
- assert(mode in ['Expander', 'Group'])
- if mode == 'Expander':
- minstd = -1
- for _ in range(20):
- m = torch.zeros(out_dim, in_dim)
- for i in range(out_dim):
- x = torch.randperm(in_dim)
- m[i][x[:size]] = 1
- std = m.sum(dim=0).std()
- if minstd == -1 or minstd > std:
- minstd = std
- mask = m
- elif mode == 'Group':
- assert(in_dim%size==0 and out_dim%size==0)
- for i in range(out_dim):
- for j in range(in_dim):
- if (i//size == j//size):
- mask[i][j] == 1
- if invert:
- mask = mask.t()
- return mask
- class SparseConv2d(torch.nn.Module):
- def __init__(self, inWCin, inWCout, kernel_size, stride=1, padding=0, dilation=1, sparse_size=0, sparse_mode='Expander'):
- super(SparseConv2d, self).__init__()
- self.kernel_size = kernel_size
- self.stride = stride
- self.padding = padding
- self.dilation = dilation
- self.out_channels = inWCout
- mask = make_sparse(in_dim=inWCin, out_dim=inWCout, size=sparse_size, mode=sparse_mode)
- weight = torch.zeros((inWCout, inWCin))
- weight = 0.01*torch.nn.init.kaiming_normal_(weight)
- weight = torch.mul(weight, mask)
- weight = weight.unsqueeze(2).unsqueeze(3).repeat(1, 1, kernel_size, kernel_size)
- weight = weight.view(weight.size(0), -1)
- weight = weight.to_sparse().cuda()
- self.weight = torch.nn.Parameter(weight, requires_grad=True)
- def forward(self, x):
- out = (x.size(2)+2*self.padding-self.dilation*(self.kernel_size-1)-1)//self.stride+1
- x_unf = torch.nn.functional.unfold(x, (self.kernel_size, self.kernel_size)).transpose(1,2)
- x_unf = torch.sparse.mm(self.weight, x_unf.reshape(-1, x_unf.size(2)).t()).t().reshape(x.size(0),-1,self.out_channels).transpose(1,2)
- x_unf = x_unf.view(x_unf.size(0), x_unf.size(1), out, out)
- return x_unf
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = nn.Conv2d(3, 6, 5, bias=False)
- self.pool = nn.MaxPool2d(2, 2)
- self.conv2 = nn.Conv2d(6, 16, 5, bias=False)
- self.fc1 = nn.Conv2d(16, 120, 5, bias=False)
- self.fc2 = nn.Conv2d(120, 84, 1, bias=False)
- self.fc3 = nn.Conv2d(84, 10, 1, bias=False)
- def forward(self, x):
- x = self.pool(F.relu(self.conv1(x)))
- x = self.pool(F.relu(self.conv2(x)))
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- x = self.fc3(x)
- x = x.view(-1, 10)
- return x
- class Net2(nn.Module):
- def __init__(self):
- super(Net2, self).__init__()
- self.conv1 = nn.Conv2d(3, 6, 5, bias=False)
- self.pool = nn.MaxPool2d(2, 2)
- self.conv2 = SparseConv2d(6, 16, 5, sparse_size=12)
- self.fc1 = SparseConv2d(16, 120, 5, sparse_size=80)
- self.fc2 = SparseConv2d(120, 84, 1, sparse_size=64)
- self.fc3 = nn.Conv2d(84, 10, 1, bias=False)
- def forward(self, x):
- x = self.pool(F.relu(self.conv1(x)))
- x = self.pool(F.relu(self.conv2(x)))
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- x = self.fc3(x)
- x = x.view(-1, 10)
- return x
- #net = Net().cuda()
- net = Net2().cuda()
- import torch.optim as optim
- criterion = nn.CrossEntropyLoss().cuda()
- optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
- for epoch in range(2): # loop over the dataset multiple times
- running_loss = 0.0
- for i, data in enumerate(trainloader, 0):
- # get the inputs; data is a list of [inputs, labels]
- inputs, labels = data
- inputs, labels = inputs.cuda(), labels.cuda()
- # zero the parameter gradients
- optimizer.zero_grad()
- # forward + backward + optimize
- outputs = net(inputs)
- loss = criterion(outputs, labels)
- loss.backward()
- optimizer.step()
- # print statistics
- running_loss += loss.item()
- if i % 2000 == 1999: # print every 2000 mini-batches
- print('[%d, %5d] loss: %.3f' %
- (epoch + 1, i + 1, running_loss / 2000))
- running_loss = 0.0
- print('Finished Training')
- correct = 0
- total = 0
- with torch.no_grad():
- for data in testloader:
- images, labels = data
- images, labels = images.cuda(), labels.cuda()
- outputs = net(images)
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- print('Accuracy of the network on the 10000 test images: %d %%' % (
- 100 * correct / total))
Advertisement
Add Comment
Please, Sign In to add comment