Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch import nn
- from torch.utils.data import DataLoader
- from torchvision import datasets, transforms
- from torchvision.transforms import ToTensor, Lambda, Resize
- # torch.backends.cudnn.benchmark=True
- device = "cuda" if torch.cuda.is_available() else "cpu"
- print("Using {} device".format(device))
- training_data = datasets.FashionMNIST(
- root=".\\fasionmnist",
- train=True,
- download=True,
- # transform=ToTensor(),
- transform=transforms.Compose([
- Resize((227, 227)),
- ToTensor()
- ])
- )
- test_data = datasets.FashionMNIST(
- root=".\\fasionmnist",
- train=False,
- download=True,
- # transform=ToTensor(),
- transform=transforms.Compose([
- Resize((227, 227)),
- ToTensor()
- ])
- )
- train_dataloader = DataLoader(training_data, batch_size=64)
- test_dataloader = DataLoader(test_data, batch_size=64)
- class NeuralNetwork(nn.Module):
- def __init__(self):
- super(NeuralNetwork, self).__init__()
- self.conv2d_relu_stack = nn.Sequential(
- nn.Conv2d(1, 96, kernel_size = 11, stride=4, padding=1), nn.ReLU(),
- nn.MaxPool2d(kernel_size = 3, stride=2),
- nn.Conv2d(96, 256, kernel_size = 5, padding=2), nn.ReLU(),
- nn.MaxPool2d(kernel_size = 3, stride=2),
- nn.Conv2d(256, 384, kernel_size = 3, padding=1), nn.ReLU(),
- nn.Conv2d(384, 384, kernel_size = 3, padding=1), nn.ReLU(),
- nn.Conv2d(384, 256, kernel_size = 3, padding=1), nn.ReLU(),
- nn.MaxPool2d(kernel_size = 3, stride=2),
- nn.Flatten(),
- nn.Linear(9216, 4096), nn.ReLU(), nn.Dropout(p=0.5),
- nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
- nn.Linear(4096, 10)
- )
- def forward(self, x):
- logits = self.conv2d_relu_stack(x)
- return logits
- model = NeuralNetwork().to(device)
- learning_rate = 1e-3
- batch_size = 64
- epochs = 5
- # Initialize the loss function
- loss_fn = nn.CrossEntropyLoss()
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
- def train_loop(dataloader, model, loss_fn, optimizer):
- size = len(dataloader.dataset)
- for batch, (X, y) in enumerate(dataloader):
- X, y = X.to(device), y.to(device)
- # Compute prediction and loss
- pred = model(X)
- loss = loss_fn(pred, y)
- # Backpropagation
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- # if batch % 100 == 0:
- # loss, current = loss.item(), batch * len(X)
- # print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
- def test_loop(dataloader, model, loss_fn):
- size = len(dataloader.dataset)
- num_batches = len(dataloader)
- test_loss, correct = 0, 0
- with torch.no_grad():
- for X, y in dataloader:
- X, y = X.to(device), y.to(device)
- pred = model(X)
- test_loss += loss_fn(pred, y).item()
- correct += (pred.argmax(1) == y).type(torch.float).sum().item()
- test_loss /= num_batches
- correct /= size
- print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
- loss_fn = nn.CrossEntropyLoss()
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
- epochs = 100
- for t in range(epochs):
- print(f"Epoch {t+1}\n-------------------------------")
- train_loop(train_dataloader, model, loss_fn, optimizer)
- test_loop(test_dataloader, model, loss_fn)
- print("Done!")
- torch.save(model.state_dict(), 'alexnet.model')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement