Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function, division
- import torch
- from torch.autograd import Variable
- import torch
- import torch.nn as nn
- import torch.optim as optim
- from torch.optim import lr_scheduler
- import numpy as np
- import torchvision
- from torchvision import datasets, models, transforms
- import matplotlib.pyplot as plt
- import time
- import os
- import copy
- import torch.utils.data as data_utils
- from torch.utils import data
- data_transforms = {
- 'train': transforms.Compose([
- transforms.RandomResizedCrop(224),
- transforms.RandomHorizontalFlip(),
- transforms.RandomRotation(20),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
- }
- data_dir = "images"
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- def imshow(inp, title=None):
- """Imshow for Tensor."""
- inp = inp.numpy().transpose((1, 2, 0))
- mean = np.array([0.485, 0.456, 0.406])
- std = np.array([0.229, 0.224, 0.225])
- inp = std * inp + mean
- inp = np.clip(inp, 0, 1)
- plt.imshow(inp)
- if title is not None:
- plt.title(title)
- plt.pause(0.001) # pause a bit so that plots are updated
- def train_model(model, criterion, optimizer, scheduler, dataloaders, num_epochs=25):
- since = time.time()
- best_model_wts = copy.deepcopy(model.state_dict())
- best_acc = 0.0
- for epoch in range(num_epochs):
- print('Epoch {}/{}'.format(epoch, num_epochs - 1))
- print('-' * 10)
- # Each epoch has a training and validation phase
- for phase in ['train']:
- if phase == 'train':
- scheduler.step()
- model.train() # Set model to training mode
- else:
- model.eval() # Set model to evaluate mode
- running_loss = 0.0
- running_corrects = 0
- # Iterate over data.
- for inputs, labels in dataloaders[phase]:
- inputs = inputs.to(device)
- labels = labels.to(device)
- # zero the parameter gradients
- optimizer.zero_grad()
- # forward
- # track history if only in train
- with torch.set_grad_enabled(phase == 'train'):
- outputs = model(inputs)
- _, preds = torch.max(outputs, 1)
- loss = criterion(outputs, labels)
- # backward + optimize only if in training phase
- if phase == 'train':
- loss.backward()
- optimizer.step()
- # statistics
- running_loss += loss.item() * inputs.size(0)
- running_corrects += torch.sum(preds == labels.data)
- epoch_loss = running_loss / dataset_sizes[phase]
- epoch_acc = running_corrects.double() / dataset_sizes[phase]
- print('{} Loss: {:.4f} Acc: {:.4f}'.format(
- phase, epoch_loss, epoch_acc))
- # deep copy the model
- # if phase == 'val' and epoch_acc > best_acc:
- # best_acc = epoch_acc
- # best_model_wts = copy.deepcopy(model.state_dict())
- print()
- time_elapsed = time.time() - since
- print('Training complete in {:.0f}m {:.0f}s'.format(
- time_elapsed // 60, time_elapsed % 60))
- # print('Best val Acc: {:4f}'.format(best_acc))
- # model.load_state_dict(best_model_wts)
- return model
- def visualize_model(model, num_images=6):
- was_training = model.training
- model.eval()
- images_so_far = 0
- fig = plt.figure()
- with torch.no_grad():
- #for i, (inputs, labels) in enumerate(dataloaders['test]):
- for i, (inputs, labels) in enumerate(dataloaders['train']):
- inputs = inputs.to(device)
- labels = labels.to(device)
- outputs = model(inputs)
- _, preds = torch.max(outputs, 1)
- for j in range(inputs.size()[0]):
- images_so_far += 1
- ax = plt.subplot(num_images//2, 2, images_so_far)
- ax.axis('off')
- ax.set_title('predicted: {}'.format(class_names[preds[j]]))
- imshow(inputs.cpu().data[j])
- if images_so_far == num_images:
- model.train(mode=was_training)
- return
- model.train(mode=was_training)
- ######################################################################
- # Finetuning the convnet
- # ----------------------
- #
- # Load a pretrained model and reset final fully connected layer.
- #
- #model_ft = models.resnet18(pretrained=True)
- model_ft = models.resnet50(pretrained=True)
- num_ftrs = model_ft.fc.in_features
- model_ft.fc = nn.Linear(num_ftrs, 9)
- model_ft = model_ft.to(device)
- criterion = nn.CrossEntropyLoss()
- # Observe that all parameters are being optimized
- optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
- # Decay LR by a factor of 0.1 every 7 epochs
- exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
- #model_ft = model_ft.cuda()
- nb_samples = 931
- nb_classes = 9
- data_transforms = {
- 'train': transforms.Compose([
- transforms.RandomResizedCrop(224),
- transforms.RandomHorizontalFlip(),
- transforms.RandomRotation(20),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
- }
- '''val_loader = data.DataLoader(
- image_datasets['train'],
- num_workers=2,
- batch_size=1
- )
- val_loader = iter(val_loader)'''
- image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
- data_transforms[x])
- for x in ['train']}
- dataset_sizes = {x: len(image_datasets[x]) for x in ['train']}
- class_names = image_datasets['train'].classes
- # LOOCV
- loocv_preds = []
- loocv_targets = []
- for idx in range(nb_samples):
- print('Using sample {} as test data'.format(idx))
- # Get all indices and remove test sample
- train_indices = list(range(len(image_datasets['train'])))
- del train_indices[idx]
- # Create new sampler
- sampler = data.SubsetRandomSampler(train_indices)
- dataloader = data.DataLoader(
- image_datasets['train'],
- num_workers=2,
- batch_size=1,
- sampler=sampler
- )
- # Train model
- for batch_idx, (samples, target) in enumerate(dataloader):
- print('Batch {}'.format(batch_idx))
- model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloader, num_epochs=25) # do I add this line here?
- # Test on LOO sample
- model_ft.eval()
- test_data, test_target = image_datasets['train'][idx]
- #test_data, test_target = dataloader.next()
- test_data = test_data.cuda()
- test_target = test_target.cuda()
- test_data.unsqueeze_(1)
- test_target.unsqueeze_(0)
- output = model_ft(test_data)
- pred = torch.argmax(output, 1)
- loocv_preds.append(pred)
- loocv_targets.append(test_target.item())
- ---------------------------------------------
- $ python loocv_tl.py
- Using sample 0 as test data
- Batch 0
- Epoch 0/24
- ----------
- Exception ignored in: <bound method _DataLoaderIter.__del__ of <torch.utils.data.dataloader._DataLoaderIter object at 0x7fd82a3f0320>>
- Traceback (most recent call last):
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 399, in __del__
- self._shutdown_workers()
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 378, in _shutdown_workers
- self.worker_result_queue.get()
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/queues.py", line 337, in get
- return _ForkingPickler.loads(res)
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
- fd = df.detach()
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/resource_sharer.py", line 57, in detach
- with _resource_sharer.get_connection(self._id) as conn:
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/resource_sharer.py", line 87, in get_connection
- c = Client(address, authkey=process.current_process().authkey)
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 494, in Client
- deliver_challenge(c, authkey)
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 722, in deliver_challenge
- response = connection.recv_bytes(256) # reject large message
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 216, in recv_bytes
- buf = self._recv_bytes(maxlength)
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
- buf = self._recv(4)
- File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 379, in _recv
- chunk = read(handle, remaining)
- ConnectionResetError: [Errno 104] Connection reset by peer
- Traceback (most recent call last):
- File "loocv_tl.py", line 231, in <module>
- model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloader, num_epochs=25) # do I add this line here?
- File "loocv_tl.py", line 78, in train_model
- for inputs, labels in dataloaders[phase]:
- TypeError: 'DataLoader' object is not subscriptable
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement