Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- %matplotlib inline
- from graphviz import Digraph
- import torch
- from torch.autograd import Variable
- # make_dot was moved to https://github.com/szagoruyko/pytorchviz
- from torchviz import make_dot
- # -*- coding: utf-8 -*-
- """
- Transfer Learning Tutorial
- ==========================
- **Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_
- In this tutorial, you will learn how to train your network using
- transfer learning. You can read more about the transfer learning at `cs231n
- notes <http://cs231n.github.io/transfer-learning/>`__
- Quoting these notes,
- In practice, very few people train an entire Convolutional Network
- from scratch (with random initialization), because it is relatively
- rare to have a dataset of sufficient size. Instead, it is common to
- pretrain a ConvNet on a very large dataset (e.g. ImageNet, which
- contains 1.2 million images with 1000 categories), and then use the
- ConvNet either as an initialization or a fixed feature extractor for
- the task of interest.
- These two major transfer learning scenarios look as follows:
- - **Finetuning the convnet**: Instead of random initializaion, we
- initialize the network with a pretrained network, like the one that is
- trained on imagenet 1000 dataset. Rest of the training looks as
- usual.
- - **ConvNet as fixed feature extractor**: Here, we will freeze the weights
- for all of the network except that of the final fully connected
- layer. This last fully connected layer is replaced with a new one
- with random weights and only this layer is trained.
- """
- # License: BSD
- # Author: Sasank Chilamkurthy
- from __future__ import print_function, division
- import torch
- import torch.nn as nn
- import torch.optim as optim
- from torch.optim import lr_scheduler
- import numpy as np
- import torchvision
- from torchvision import datasets, models, transforms
- import matplotlib.pyplot as plt
- import time
- import os
- import copy
- plt.ion() # interactive mode
- ######################################################################
- # Load Data
- # ---------
- #
- # We will use torchvision and torch.utils.data packages for loading the
- # data.
- #
- # The problem we're going to solve today is to train a model to classify
- # **ants** and **bees**. We have about 120 training images each for ants and bees.
- # There are 75 validation images for each class. Usually, this is a very
- # small dataset to generalize upon, if trained from scratch. Since we
- # are using transfer learning, we should be able to generalize reasonably
- # well.
- #
- # This dataset is a very small subset of imagenet.
- #
- # .. Note ::
- # Download the data from
- # `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_
- # and extract it to the current directory.
- # Data augmentation and normalization for training
- # Just normalization for validation
- data_transforms = {
- 'train': transforms.Compose([
- transforms.RandomResizedCrop(224),
- transforms.RandomHorizontalFlip(),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ]),
- # 'val': transforms.Compose([
- # transforms.Resize(256),
- # transforms.CenterCrop(224),
- # transforms.ToTensor(),
- # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- # ]),
- 'test': transforms.Compose([
- transforms.Resize(256),
- transforms.CenterCrop(224),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ]),
- }
- #data_dir = 'hymenoptera_data'
- #data_dir = "mona_data"
- #data_dir = "shooting_data_2class"
- #data_dir = "shooting_data_3cat"
- data_dir = "images"
- image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
- data_transforms[x])
- for x in ['train', 'test']}
- dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
- shuffle=True, num_workers=4)
- for x in ['train', 'test']}
- dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}
- class_names = image_datasets['train'].classes
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- ######################################################################
- # Visualize a few images
- # ^^^^^^^^^^^^^^^^^^^^^^
- # Let's visualize a few training images so as to understand the data
- # augmentations.
- def imshow(inp, title=None):
- """Imshow for Tensor."""
- inp = inp.numpy().transpose((1, 2, 0))
- mean = np.array([0.485, 0.456, 0.406])
- std = np.array([0.229, 0.224, 0.225])
- inp = std * inp + mean
- inp = np.clip(inp, 0, 1)
- plt.imshow(inp)
- if title is not None:
- plt.title(title)
- plt.pause(0.001) # pause a bit so that plots are updated
- # Get a batch of training data
- inputs, classes = next(iter(dataloaders['train']))
- # Make a grid from batch
- out = torchvision.utils.make_grid(inputs)
- imshow(out, title=[class_names[x] for x in classes])
- ######################################################################
- # Training the model
- # ------------------
- #
- # Now, let's write a general function to train a model. Here, we will
- # illustrate:
- #
- # - Scheduling the learning rate
- # - Saving the best model
- #
- # In the following, parameter ``scheduler`` is an LR scheduler object from
- # ``torch.optim.lr_scheduler``.
- def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
- since = time.time()
- best_model_wts = copy.deepcopy(model.state_dict())
- best_acc = 0.0
- for epoch in range(num_epochs):
- print('Epoch {}/{}'.format(epoch, num_epochs - 1))
- print('-' * 10)
- # Each epoch has a training and validation phase
- for phase in ['train', 'test']:
- if phase == 'train':
- scheduler.step()
- model.train() # Set model to training mode
- ## else:
- ## model.eval() # Set model to evaluate mode
- running_loss = 0.0
- running_corrects = 0
- # Iterate over data.
- for inputs, labels in dataloaders[phase]:
- inputs = inputs.to(device)
- labels = labels.to(device)
- # zero the parameter gradients
- optimizer.zero_grad()
- # forward
- # track history if only in train
- with torch.set_grad_enabled(phase == 'train'):
- outputs = model(inputs)
- _, preds = torch.max(outputs, 1)
- loss = criterion(outputs, labels)
- # backward + optimize only if in training phase
- if phase == 'train':
- loss.backward()
- optimizer.step()
- # statistics
- running_loss += loss.item() * inputs.size(0)
- running_corrects += torch.sum(preds == labels.data)
- epoch_loss = running_loss / dataset_sizes[phase]
- epoch_acc = running_corrects.double() / dataset_sizes[phase]
- print('{} Loss: {:.4f} Acc: {:.4f}'.format(
- phase, epoch_loss, epoch_acc))
- # deep copy the model
- # if phase == 'val' and epoch_acc > best_acc:
- # best_acc = epoch_acc
- # best_model_wts = copy.deepcopy(model.state_dict())
- print()
- time_elapsed = time.time() - since
- print('Training complete in {:.0f}m {:.0f}s'.format(
- time_elapsed // 60, time_elapsed % 60))
- # print('Best val Acc: {:4f}'.format(best_acc))
- # load best model weights
- # model.load_state_dict(best_model_wts)
- return model
- ######################################################################
- # Visualizing the model predictions
- # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- #
- # Generic function to display predictions for a few images
- #
- def visualize_model(model, num_images=6):
- was_training = model.training
- model.eval()
- images_so_far = 0
- fig = plt.figure()
- with torch.no_grad():
- for i, (inputs, labels) in enumerate(dataloaders['test']):
- inputs = inputs.to(device)
- labels = labels.to(device)
- outputs = model(inputs)
- _, preds = torch.max(outputs, 1)
- for j in range(inputs.size()[0]):
- images_so_far += 1
- ax = plt.subplot(num_images//2, 2, images_so_far)
- ax.axis('off')
- ax.set_title('predicted: {}'.format(class_names[preds[j]]))
- imshow(inputs.cpu().data[j])
- if images_so_far == num_images:
- model.train(mode=was_training)
- return
- model.train(mode=was_training)
- ######################################################################
- # Finetuning the convnet
- # ----------------------
- #
- # Load a pretrained model and reset final fully connected layer.
- #
- #model_ft = models.resnet18(pretrained=True)
- model_ft = models.resnet50(pretrained=True)
- num_ftrs = model_ft.fc.in_features
- model_ft.fc = nn.Linear(num_ftrs, 9)
- model_ft = model_ft.to(device)
- criterion = nn.CrossEntropyLoss()
- # Observe that all parameters are being optimized
- optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
- # Decay LR by a factor of 0.1 every 7 epochs
- exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
- ######################################################################
- # Train and evaluate
- # ^^^^^^^^^^^^^^^^^^
- #
- # It should take around 15-25 min on CPU. On GPU though, it takes less than a
- # minute.
- #
- model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
- num_epochs=25)
- ######################################################################
- #
- visualize_model(model_ft)
- ---------------------------------------------------------------------
- Epoch 0/24
- ----------
- train Loss: 2.0849 Acc: 0.3047
- test Loss: 1.9907 Acc: 0.3643
- Epoch 1/24
- ----------
- train Loss: 1.9912 Acc: 0.3262
- test Loss: 1.9723 Acc: 0.3143
- Epoch 2/24
- ----------
- train Loss: 1.8772 Acc: 0.3451
- test Loss: 1.9634 Acc: 0.3429
- Epoch 3/24
- ----------
- train Loss: 1.8997 Acc: 0.3477
- test Loss: 2.2405 Acc: 0.2857
- Epoch 4/24
- ----------
- train Loss: 1.8376 Acc: 0.3869
- test Loss: 2.0975 Acc: 0.2857
- Epoch 5/24
- ----------
- train Loss: 1.7459 Acc: 0.4121
- test Loss: 2.0324 Acc: 0.3143
- Epoch 6/24
- ----------
- train Loss: 1.7635 Acc: 0.4046
- test Loss: 2.0811 Acc: 0.4071
- Epoch 7/24
- ----------
- train Loss: 1.4989 Acc: 0.4829
- test Loss: 1.9167 Acc: 0.4214
- Epoch 8/24
- ----------
- train Loss: 1.3821 Acc: 0.5145
- test Loss: 1.9868 Acc: 0.3929
- Epoch 9/24
- ----------
- train Loss: 1.3185 Acc: 0.5575
- test Loss: 1.9225 Acc: 0.4143
- Epoch 10/24
- ----------
- train Loss: 1.3083 Acc: 0.5436
- test Loss: 1.9001 Acc: 0.4357
- Epoch 11/24
- ----------
- train Loss: 1.2618 Acc: 0.5638
- test Loss: 1.9409 Acc: 0.4000
- Epoch 12/24
- ----------
- train Loss: 1.2696 Acc: 0.5765
- test Loss: 1.9952 Acc: 0.3857
- Epoch 13/24
- ----------
- train Loss: 1.2782 Acc: 0.5638
- test Loss: 1.8705 Acc: 0.4143
- Epoch 14/24
- ----------
- train Loss: 1.1628 Acc: 0.6233
- test Loss: 1.9135 Acc: 0.4071
- Epoch 15/24
- ----------
- train Loss: 1.2363 Acc: 0.5904
- test Loss: 1.9826 Acc: 0.4071
- Epoch 16/24
- ----------
- train Loss: 1.2247 Acc: 0.5879
- test Loss: 1.9062 Acc: 0.4357
- Epoch 17/24
- ----------
- train Loss: 1.1758 Acc: 0.6157
- test Loss: 1.9463 Acc: 0.4500
- Epoch 18/24
- ----------
- train Loss: 1.2133 Acc: 0.5942
- test Loss: 1.9168 Acc: 0.4143
- Epoch 19/24
- ----------
- train Loss: 1.1976 Acc: 0.5828
- test Loss: 1.9197 Acc: 0.4000
- Epoch 20/24
- ----------
- train Loss: 1.1934 Acc: 0.6119
- test Loss: 1.8853 Acc: 0.4071
- Epoch 21/24
- ----------
- train Loss: 1.1578 Acc: 0.6068
- test Loss: 1.9011 Acc: 0.3929
- Epoch 22/24
- ----------
- train Loss: 1.1713 Acc: 0.5967
- test Loss: 1.8918 Acc: 0.4143
- Epoch 23/24
- ----------
- train Loss: 1.1726 Acc: 0.6068
- test Loss: 1.9312 Acc: 0.4643
- Epoch 24/24
- ----------
- train Loss: 1.1744 Acc: 0.5891
- test Loss: 1.8914 Acc: 0.4143
- Training complete in 5m 0s
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement