Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- #from torchvision import datasets, transforms
- import matplotlib.pyplot as plt
- import numpy as np
- import torch.utils.data as utils
- # The parts that you should complete are designated as TODO
- class ConvNet(nn.Module):
- def __init__(self):
- super(ConvNet, self).__init__()
- # TODO: define the layers of the network
- self.Cn1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3))
- self.Cn2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
- self.MP = nn.MaxPool2d(kernel_size = (2,2), stride = 2)
- self.dp = nn.Dropout2d(p = .25)
- self.lin1 = nn.Linear(9216,128)
- self.dp2 = nn.Dropout(p = .5)
- self.fin = nn.Linear(128,10)
- self.firstPrint = True;
- def forward(self, x):
- # TODO: define the forward pass of the network using the layers you defined in constructor
- output1 = torch.relu(self.Cn1(x))
- #print(output1.shape)
- output2 = torch.relu(self.Cn2(output1))
- #print(output2.shape)
- output3 = self.MP(output2)
- #print(output3.shape)
- dropPut = self.dp(output3)
- #print(dropPut.shape)
- flatPut = torch.flatten(dropPut, start_dim = 1)
- #print(flatPut.shape)
- #print(flatPut.shape)
- linPut = torch.relu(self.lin1(flatPut))
- #print(linPut.shape)
- dropPut = self.dp2(linPut)
- #print(dropPut.shape)
- finPut = torch.softmax(self.fin(dropPut), dim = 1)
- #print(finPut.shape)
- return finPut;
- def train(model, device, train_loader, optimizer, epoch):
- model.train()
- correct = 0
- for batch_idx, (data, target) in enumerate(train_loader):
- data, target = data.to(device), target.to(device)
- optimizer.zero_grad()
- output = model(data)
- loss = F.cross_entropy(output, target)
- loss.backward()
- optimizer.step()
- if batch_idx % 100 == 0: #Print loss every 100 batch
- print('Train Epoch: {}\tLoss: {:.6f}'.format(
- epoch, loss.item()))
- accuracy = test(model, device, train_loader)
- return accuracy
- def test(model, device, test_loader):
- model.eval()
- correct = 0
- with torch.no_grad():
- for data, target in test_loader:
- data, target = data.to(device), target.to(device)
- output = model(data)
- pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
- correct += pred.eq(target.view_as(pred)).sum().item()
- accuracy = 100. * correct / len(test_loader.dataset)
- return accuracy
- def main():
- torch.manual_seed(1)
- np.random.seed(1)
- # Training settings
- use_cuda = False # Switch to False if you only want to use your CPU
- learning_rate = 0.01
- NumEpochs = 10
- batch_size = 32
- device = torch.device("cuda" if use_cuda else "cpu")
- train_X = np.load('D:/589_MachineLearning/hw4/Data/X_train.npy')
- train_Y = np.load('D:/589_MachineLearning/hw4/Data/y_train.npy')
- test_X = np.load('D:/589_MachineLearning/hw4/Data/X_test.npy')
- test_Y = np.load('D:/589_MachineLearning/hw4/Data/y_test.npy')
- train_X = train_X.reshape([-1,1,28,28]) # the data is flatten so we reshape it here to get to the original dimensions of images
- test_X = test_X.reshape([-1,1,28,28])
- # transform to torch tensors
- tensor_x = torch.tensor(train_X, device=device)
- tensor_y = torch.tensor(train_Y, dtype=torch.long, device=device)
- test_tensor_x = torch.tensor(test_X, device=device)
- test_tensor_y = torch.tensor(test_Y, dtype=torch.long)
- train_dataset = utils.TensorDataset(tensor_x,tensor_y) # create your datset
- train_loader = utils.DataLoader(train_dataset, batch_size=batch_size) # create your dataloader
- test_dataset = utils.TensorDataset(test_tensor_x,test_tensor_y) # create your datset
- test_loader = utils.DataLoader(test_dataset) # create your dataloader if you get a error when loading test data you can set a batch_size here as well like train_dataloader
- model = ConvNet().to(device)
- optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0001)
- testyArr = []
- trainyArr = []
- for epoch in range(NumEpochs):
- train_acc = train(model, device, train_loader, optimizer, epoch)
- trainyArr.append(train_acc)
- print('\nTrain set Accuracy: {:.0f}%\n'.format(train_acc))
- test_acc = test(model, device, test_loader)
- testyArr.append(test_acc)
- print('\nTest set Accuracy: {:.0f}%\n'.format(test_acc))
- torch.save(model.state_dict(), "mnist_cnn.pt")
- #TODO: Plot train and test accuracy vs epoch
- plt.plot(trainyArr, range(NumEpochs),label = "Train")
- plt.plot(testyArr, range(NumEpochs),label = "Test")
- plt.xlabel("accuracy")
- plt.ylabel("Epochs")
- plt.legend()
- plt.show();
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement