Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torchvision.datasets as dsets
- import torchvision.transforms as transforms
- from torch.autograd import Variable
- # Hyper Parameters
- num_epochs = 5
- batch_size = 100
- learning_rate = 0.001
- use_cuda = True if torch.cuda.is_available() else False
- # MNIST Dataset
- train_dataset = dsets.MNIST(root=r'E:\DataSets\mnist',
- train=True,
- transform=transforms.ToTensor(),
- download=True)
- test_dataset = dsets.MNIST(root=r'E:\DataSets\mnist',
- train=False,
- transform=transforms.ToTensor())
- # Data Loader (Input Pipeline)
- train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
- batch_size=batch_size,
- shuffle=True)
- test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
- batch_size=batch_size,
- shuffle=False)
- # CNN Model (2 conv layers)
- class CNN(nn.Module):
- def __init__(self):
- super(CNN, self).__init__()
- # input size None * 1 * 28 * 28
- self.layer1 = nn.Sequential(
- nn.Conv2d(1, 16, kernel_size=5, padding=2),
- nn.BatchNorm2d(16),
- nn.ReLU(),
- nn.MaxPool2d(2)
- )
- # size None * 16 * 14 * 14
- self.layer2 = nn.Sequential(
- nn.Conv2d(16, 32, kernel_size=5, padding=2),
- nn.BatchNorm2d(32),
- nn.ReLU(),
- nn.MaxPool2d(2)
- )
- # size None * 32 * 7 * 7
- self.fc = nn.Linear(7 * 7 * 32, 10)
- # size None * 10
- def forward(self, x):
- out = self.layer1(x)
- out = self.layer2(out)
- out = out.view(out.size(0), -1)
- out = self.fc(out)
- return out
- cnn = CNN()
- # Loss and Optimizer
- criterion = nn.CrossEntropyLoss()
- optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
- if use_cuda:
- cnn = cnn.cuda()
- criterion = criterion.cuda()
- # Train the Model
- for epoch in range(num_epochs):
- for i, (images, labels) in enumerate(train_loader):
- images = Variable(images)
- labels = Variable(labels)
- if use_cuda:
- images = images.cuda()
- labels = labels.cuda()
- # Forward + Backward + Optimize
- optimizer.zero_grad()
- outputs = cnn(images)
- loss = criterion(outputs, labels)
- loss.backward()
- optimizer.step()
- if (i + 1) % 100 == 0:
- print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
- % (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))
- # Test the Model
- cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
- correct = 0
- total = 0
- for images, labels in test_loader:
- images = Variable(images)
- if use_cuda:
- images = images.cuda()
- outputs = cnn(images)
- _, predicted = torch.max(outputs.cpu().data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum()
- print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
- # Save the Trained Model
- torch.save(cnn.state_dict(), 'cnn.pkl')
Add Comment
Please, Sign In to add comment