Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """SoftwareAIModel.ipynb
- Automatically generated by Colaboratory.
- Original file is located at
- https://colab.research.google.com/drive/1sZUoUsKe8w5H25w5D2pfuSNQ6AcDvbbr
- #Importing needed modules
- """
- import torch
- import torchvision
- from torchvision import transforms, datasets
- import torch.nn as nn
- import torch.nn.functional as F
- """#Creating Data Sets"""
- # Batching Data for training and Testing
- train = datasets.MNIST('', train=True, download=True,
- transform=transforms.Compose([
- transforms.ToTensor()
- ]))
- test = datasets.MNIST('', train=False, download=True,
- transform=transforms.Compose([
- transforms.ToTensor()
- ]))
- trainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
- testset = torch.utils.data.DataLoader(test, batch_size=10, shuffle=False)
- """#Creating actual AI learning Algorithm"""
- # Training Class
- class Net(nn.Module):
- def __init__(self):
- super().__init__()
- self.fc1 = nn.Linear(28*28, 64)
- self.fc2 = nn.Linear(64, 64)
- self.fc3 = nn.Linear(64, 64)
- self.fc4 = nn.Linear(64, 10)
- def forward(self, x):
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- x = F.relu(self.fc3(x))
- x = self.fc4(x)
- return F.log_softmax(x, dim=1)
- net = Net()
- print(net)
- """# Optimiser"""
- # Creating Optimiser Function
- import torch.optim as optim
- loss_function = nn.CrossEntropyLoss()
- optimiser = optim.Adam(net.parameters(), lr=0.001)
- accploss = 5*(10**(-5)) #add acceptable loss amount here
- """# Model Training"""
- #Choose device for training
- if torch.cuda.is_available:
- device = torch.device("cuda:0")
- print("Running on GPU")
- else:
- device = torch.device("cpu")
- print("running on CPU")
- net.to(device)
- net = Net().to(device)
- for epoch in range(50): # n number of full passes over the data
- for data in trainset: # `data` is a batch of data
- X, y = data # X is the batch of features, y is the batch of targets.
- X, y = X.to(device), y.to(device)
- net.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
- output = net(X.view(-1,784)) # pass in the reshaped batch (recall they are 28x28 atm)
- loss = F.nll_loss(output, y) # calc and grab the loss value
- loss.backward() # apply this loss backwards through the network's parameters
- optimiser.step() # attempt to optimize weights to account for loss/gradients
- print(loss) # print loss
- losslist.append(loss.item())
- if loss < (1*(10**-5)):
- break
- """# Model Testing"""
- # Testing model
- correct = 0
- total = 0
- X, y= X.to(device), y.to(device)
- with torch.no_grad():
- for data in testset:
- X, y = data
- output = net(X.view(-1,784).to(device))
- #print(output) # commented out, use for debugging
- for idx, i in enumerate(output):
- #print(torch.argmax(i), y[idx]) # commented out, use for debugging
- if torch.argmax(i) == y[idx]:
- correct += 1
- total += 1
- print("Accuracy: ", round(correct/total, 3))
- import matplotlib.pyplot as plt
- plt.imshow(X[9].view(28,28))
- plt.show()
- a_featureset = X[0]
- reshaped_for_network = a_featureset.view(-1,784) # 784 because 28*28 image resolution.
- output = net(reshaped_for_network.to(device)) #output will be a list of network predictions.
- first_pred = output[0]
- print(first_pred)
- #Which index value is the greatest? We use 'argmax' to find this:
- biggest_index = torch.argmax(first_pred)
- print(biggest_index)
- #Code below is explained above
- #print(torch.argmax(net(X[9].view(-1,784))[0]))
- """The model gives acceptable results, therefore it is a reasonable idea to export it so we can have a model that can be implemented into the webcam number detection algorithm."""
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement