Advertisement
Guest User

Untitled

a guest
Nov 14th, 2021
138
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.53 KB | None | 0 0
  1. import torch
  2. from torch import nn
  3. from torch.utils.data import DataLoader
  4. from torchvision import datasets, transforms
  5. from torchvision.transforms import ToTensor, Lambda, Resize
  6. # torch.backends.cudnn.benchmark=True
  7.  
  8. device = "cuda" if torch.cuda.is_available() else "cpu"
  9. print("Using {} device".format(device))
  10.  
  11.  
  12. training_data = datasets.FashionMNIST(
  13. root=".\\fasionmnist",
  14. train=True,
  15. download=True,
  16. # transform=ToTensor(),
  17. transform=transforms.Compose([
  18. Resize((227, 227)),
  19. ToTensor()
  20. ])
  21. )
  22.  
  23. test_data = datasets.FashionMNIST(
  24. root=".\\fasionmnist",
  25. train=False,
  26. download=True,
  27. # transform=ToTensor(),
  28. transform=transforms.Compose([
  29. Resize((227, 227)),
  30. ToTensor()
  31. ])
  32. )
  33.  
  34. train_dataloader = DataLoader(training_data, batch_size=64)
  35. test_dataloader = DataLoader(test_data, batch_size=64)
  36.  
  37. class NeuralNetwork(nn.Module):
  38. def __init__(self):
  39. super(NeuralNetwork, self).__init__()
  40. self.conv2d_relu_stack = nn.Sequential(
  41. nn.Conv2d(1, 96, kernel_size = 11, stride=4, padding=1), nn.ReLU(),
  42. nn.MaxPool2d(kernel_size = 3, stride=2),
  43. nn.Conv2d(96, 256, kernel_size = 5, padding=2), nn.ReLU(),
  44. nn.MaxPool2d(kernel_size = 3, stride=2),
  45. nn.Conv2d(256, 384, kernel_size = 3, padding=1), nn.ReLU(),
  46. nn.Conv2d(384, 384, kernel_size = 3, padding=1), nn.ReLU(),
  47. nn.Conv2d(384, 256, kernel_size = 3, padding=1), nn.ReLU(),
  48. nn.MaxPool2d(kernel_size = 3, stride=2),
  49. nn.Flatten(),
  50. nn.Linear(9216, 4096), nn.ReLU(), nn.Dropout(p=0.5),
  51. nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
  52. nn.Linear(4096, 10)
  53. )
  54.  
  55. def forward(self, x):
  56. logits = self.conv2d_relu_stack(x)
  57. return logits
  58.  
  59. model = NeuralNetwork().to(device)
  60.  
  61. learning_rate = 1e-3
  62. batch_size = 64
  63. epochs = 5
  64.  
  65. # Initialize the loss function
  66. loss_fn = nn.CrossEntropyLoss()
  67.  
  68. optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
  69.  
  70. def train_loop(dataloader, model, loss_fn, optimizer):
  71. size = len(dataloader.dataset)
  72. for batch, (X, y) in enumerate(dataloader):
  73. X, y = X.to(device), y.to(device)
  74. # Compute prediction and loss
  75. pred = model(X)
  76. loss = loss_fn(pred, y)
  77.  
  78. # Backpropagation
  79. optimizer.zero_grad()
  80. loss.backward()
  81. optimizer.step()
  82.  
  83. # if batch % 100 == 0:
  84. # loss, current = loss.item(), batch * len(X)
  85. # print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
  86.  
  87.  
  88. def test_loop(dataloader, model, loss_fn):
  89. size = len(dataloader.dataset)
  90. num_batches = len(dataloader)
  91. test_loss, correct = 0, 0
  92.  
  93. with torch.no_grad():
  94. for X, y in dataloader:
  95. X, y = X.to(device), y.to(device)
  96. pred = model(X)
  97. test_loss += loss_fn(pred, y).item()
  98. correct += (pred.argmax(1) == y).type(torch.float).sum().item()
  99.  
  100. test_loss /= num_batches
  101. correct /= size
  102. print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
  103.  
  104. loss_fn = nn.CrossEntropyLoss()
  105. optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
  106.  
  107. epochs = 100
  108. for t in range(epochs):
  109. print(f"Epoch {t+1}\n-------------------------------")
  110. train_loop(train_dataloader, model, loss_fn, optimizer)
  111. test_loop(test_dataloader, model, loss_fn)
  112. print("Done!")
  113.  
  114. torch.save(model.state_dict(), 'alexnet.model')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement