Guest User

Untitled

a guest
Dec 13th, 2017
97
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.20 KB | None | 0 0
  1. import torch
  2. import torch.nn as nn
  3. import torchvision.datasets as dsets
  4. import torchvision.transforms as transforms
  5. from torch.autograd import Variable
  6.  
  7. # Hyper Parameters
  8. num_epochs = 5
  9. batch_size = 100
  10. learning_rate = 0.001
  11. use_cuda = True if torch.cuda.is_available() else False
  12.  
  13. # MNIST Dataset
  14. train_dataset = dsets.MNIST(root=r'E:\DataSets\mnist',
  15. train=True,
  16. transform=transforms.ToTensor(),
  17. download=True)
  18.  
  19. test_dataset = dsets.MNIST(root=r'E:\DataSets\mnist',
  20. train=False,
  21. transform=transforms.ToTensor())
  22.  
  23. # Data Loader (Input Pipeline)
  24. train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
  25. batch_size=batch_size,
  26. shuffle=True)
  27.  
  28. test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
  29. batch_size=batch_size,
  30. shuffle=False)
  31.  
  32.  
  33. # CNN Model (2 conv layers)
  34. class CNN(nn.Module):
  35. def __init__(self):
  36. super(CNN, self).__init__()
  37. # input size None * 1 * 28 * 28
  38. self.layer1 = nn.Sequential(
  39. nn.Conv2d(1, 16, kernel_size=5, padding=2),
  40. nn.BatchNorm2d(16),
  41. nn.ReLU(),
  42. nn.MaxPool2d(2)
  43. )
  44. # size None * 16 * 14 * 14
  45. self.layer2 = nn.Sequential(
  46. nn.Conv2d(16, 32, kernel_size=5, padding=2),
  47. nn.BatchNorm2d(32),
  48. nn.ReLU(),
  49. nn.MaxPool2d(2)
  50. )
  51. # size None * 32 * 7 * 7
  52. self.fc = nn.Linear(7 * 7 * 32, 10)
  53. # size None * 10
  54.  
  55. def forward(self, x):
  56. out = self.layer1(x)
  57. out = self.layer2(out)
  58. out = out.view(out.size(0), -1)
  59. out = self.fc(out)
  60. return out
  61.  
  62.  
  63. cnn = CNN()
  64.  
  65. # Loss and Optimizer
  66. criterion = nn.CrossEntropyLoss()
  67. optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
  68.  
  69. if use_cuda:
  70. cnn = cnn.cuda()
  71. criterion = criterion.cuda()
  72.  
  73. # Train the Model
  74. for epoch in range(num_epochs):
  75. for i, (images, labels) in enumerate(train_loader):
  76. images = Variable(images)
  77. labels = Variable(labels)
  78.  
  79. if use_cuda:
  80. images = images.cuda()
  81. labels = labels.cuda()
  82.  
  83. # Forward + Backward + Optimize
  84. optimizer.zero_grad()
  85. outputs = cnn(images)
  86. loss = criterion(outputs, labels)
  87. loss.backward()
  88. optimizer.step()
  89.  
  90. if (i + 1) % 100 == 0:
  91. print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
  92. % (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))
  93.  
  94. # Test the Model
  95. cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
  96. correct = 0
  97. total = 0
  98. for images, labels in test_loader:
  99. images = Variable(images)
  100. if use_cuda:
  101. images = images.cuda()
  102. outputs = cnn(images)
  103. _, predicted = torch.max(outputs.cpu().data, 1)
  104. total += labels.size(0)
  105. correct += (predicted == labels).sum()
  106.  
  107. print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
  108.  
  109. # Save the Trained Model
  110. torch.save(cnn.state_dict(), 'cnn.pkl')
Add Comment
Please, Sign In to add comment