Advertisement
lamiastella

TypeError: 'DataLoader' object is not subscriptable

Nov 20th, 2018
758
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 9.59 KB | None | 0 0
  1. from __future__ import print_function, division
  2.  
  3. import torch
  4. from torch.autograd import Variable
  5.  
  6.  
  7.  
  8. import torch
  9. import torch.nn as nn
  10. import torch.optim as optim
  11. from torch.optim import lr_scheduler
  12. import numpy as np
  13. import torchvision
  14. from torchvision import datasets, models, transforms
  15. import matplotlib.pyplot as plt
  16. import time
  17. import os
  18. import copy
  19.  
  20.  
  21.  
  22. import torch.utils.data as data_utils
  23. from torch.utils import data
  24.  
  25.  
  26. data_transforms = {
  27.     'train': transforms.Compose([
  28.         transforms.RandomResizedCrop(224),
  29.         transforms.RandomHorizontalFlip(),
  30.         transforms.RandomRotation(20),
  31.         transforms.ToTensor(),
  32.         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  33.     ])
  34. }
  35.  
  36.  
  37. data_dir = "images"
  38.  
  39. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  40.  
  41.  
  42. def imshow(inp, title=None):
  43.     """Imshow for Tensor."""
  44.     inp = inp.numpy().transpose((1, 2, 0))
  45.     mean = np.array([0.485, 0.456, 0.406])
  46.     std = np.array([0.229, 0.224, 0.225])
  47.     inp = std * inp + mean
  48.     inp = np.clip(inp, 0, 1)
  49.     plt.imshow(inp)
  50.     if title is not None:
  51.         plt.title(title)
  52.     plt.pause(0.001)  # pause a bit so that plots are updated
  53.  
  54.  
  55.  
  56. def train_model(model, criterion, optimizer, scheduler, dataloaders, num_epochs=25):
  57.     since = time.time()
  58.  
  59.     best_model_wts = copy.deepcopy(model.state_dict())
  60.     best_acc = 0.0
  61.  
  62.     for epoch in range(num_epochs):
  63.         print('Epoch {}/{}'.format(epoch, num_epochs - 1))
  64.         print('-' * 10)
  65.  
  66.         # Each epoch has a training and validation phase
  67.         for phase in ['train']:
  68.             if phase == 'train':
  69.                 scheduler.step()
  70.                 model.train()  # Set model to training mode
  71.             else:
  72.                 model.eval()   # Set model to evaluate mode
  73.  
  74.             running_loss = 0.0
  75.             running_corrects = 0
  76.  
  77.             # Iterate over data.
  78.             for inputs, labels in dataloaders[phase]:
  79.                 inputs = inputs.to(device)
  80.                 labels = labels.to(device)
  81.  
  82.                 # zero the parameter gradients
  83.                 optimizer.zero_grad()
  84.  
  85.                 # forward
  86.                 # track history if only in train
  87.                 with torch.set_grad_enabled(phase == 'train'):
  88.                     outputs = model(inputs)
  89.                     _, preds = torch.max(outputs, 1)
  90.                     loss = criterion(outputs, labels)
  91.  
  92.                     # backward + optimize only if in training phase
  93.                     if phase == 'train':
  94.                         loss.backward()
  95.                         optimizer.step()
  96.  
  97.                 # statistics
  98.                 running_loss += loss.item() * inputs.size(0)
  99.                 running_corrects += torch.sum(preds == labels.data)
  100.  
  101.             epoch_loss = running_loss / dataset_sizes[phase]
  102.             epoch_acc = running_corrects.double() / dataset_sizes[phase]
  103.  
  104.             print('{} Loss: {:.4f} Acc: {:.4f}'.format(
  105.                 phase, epoch_loss, epoch_acc))
  106.  
  107.             # deep copy the model
  108.  #           if phase == 'val' and epoch_acc > best_acc:
  109.  #               best_acc = epoch_acc
  110.  #               best_model_wts = copy.deepcopy(model.state_dict())
  111.  
  112.         print()
  113.  
  114.     time_elapsed = time.time() - since
  115.     print('Training complete in {:.0f}m {:.0f}s'.format(
  116.         time_elapsed // 60, time_elapsed % 60))
  117. #    print('Best val Acc: {:4f}'.format(best_acc))
  118.  
  119. #    model.load_state_dict(best_model_wts)
  120.     return model
  121.  
  122.  
  123. def visualize_model(model, num_images=6):
  124.     was_training = model.training
  125.     model.eval()
  126.     images_so_far = 0
  127.     fig = plt.figure()
  128.  
  129.     with torch.no_grad():
  130.         #for i, (inputs, labels) in enumerate(dataloaders['test]):
  131.         for i, (inputs, labels) in enumerate(dataloaders['train']):
  132.  
  133.             inputs = inputs.to(device)
  134.             labels = labels.to(device)
  135.  
  136.             outputs = model(inputs)
  137.             _, preds = torch.max(outputs, 1)
  138.  
  139.             for j in range(inputs.size()[0]):
  140.                 images_so_far += 1
  141.                 ax = plt.subplot(num_images//2, 2, images_so_far)
  142.                 ax.axis('off')
  143.                 ax.set_title('predicted: {}'.format(class_names[preds[j]]))
  144.                 imshow(inputs.cpu().data[j])
  145.  
  146.                 if images_so_far == num_images:
  147.                     model.train(mode=was_training)
  148.                     return
  149.         model.train(mode=was_training)
  150.  
  151.  
  152.  
  153. ######################################################################
  154. # Finetuning the convnet
  155. # ----------------------
  156. #
  157. # Load a pretrained model and reset final fully connected layer.
  158. #
  159.  
  160. #model_ft = models.resnet18(pretrained=True)
  161. model_ft = models.resnet50(pretrained=True)
  162.  
  163. num_ftrs = model_ft.fc.in_features
  164. model_ft.fc = nn.Linear(num_ftrs, 9)
  165.  
  166. model_ft = model_ft.to(device)
  167.  
  168. criterion = nn.CrossEntropyLoss()
  169.  
  170. # Observe that all parameters are being optimized
  171. optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
  172.  
  173. # Decay LR by a factor of 0.1 every 7 epochs
  174. exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
  175.  
  176.  
  177.  
  178. #model_ft = model_ft.cuda()
  179. nb_samples = 931
  180. nb_classes = 9
  181.  
  182.  
  183. data_transforms = {
  184.     'train': transforms.Compose([
  185.         transforms.RandomResizedCrop(224),
  186.         transforms.RandomHorizontalFlip(),
  187.         transforms.RandomRotation(20),
  188.         transforms.ToTensor(),
  189.         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  190.     ])
  191. }
  192.  
  193. '''val_loader = data.DataLoader(
  194.        image_datasets['train'],
  195.        num_workers=2,
  196.        batch_size=1
  197.    )
  198. val_loader = iter(val_loader)'''
  199.  
  200. image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
  201.                                           data_transforms[x])
  202.                   for x in ['train']}
  203.  
  204. dataset_sizes = {x: len(image_datasets[x]) for x in ['train']}
  205. class_names = image_datasets['train'].classes
  206.  
  207. # LOOCV
  208. loocv_preds = []
  209. loocv_targets = []
  210. for idx in range(nb_samples):
  211.    
  212.     print('Using sample {} as test data'.format(idx))
  213.    
  214.     # Get all indices and remove test sample
  215.     train_indices = list(range(len(image_datasets['train'])))
  216.     del train_indices[idx]
  217.    
  218.     # Create new sampler
  219.     sampler = data.SubsetRandomSampler(train_indices)
  220.  
  221.     dataloader = data.DataLoader(
  222.         image_datasets['train'],
  223.         num_workers=2,
  224.         batch_size=1,
  225.         sampler=sampler
  226.     )
  227.    
  228.     # Train model
  229.     for batch_idx, (samples, target) in enumerate(dataloader):
  230.         print('Batch {}'.format(batch_idx))
  231.         model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloader, num_epochs=25) # do I add this line here?
  232.                
  233.     # Test on LOO sample
  234.     model_ft.eval()
  235.     test_data, test_target = image_datasets['train'][idx]
  236.     #test_data, test_target = dataloader.next()
  237.     test_data = test_data.cuda()
  238.     test_target = test_target.cuda()
  239.     test_data.unsqueeze_(1)
  240.     test_target.unsqueeze_(0)
  241.  
  242.     output = model_ft(test_data)
  243.     pred = torch.argmax(output, 1)
  244.     loocv_preds.append(pred)
  245.     loocv_targets.append(test_target.item())
  246.  
  247.  
  248. ---------------------------------------------
  249. $ python loocv_tl.py
  250. Using sample 0 as test data
  251. Batch 0
  252. Epoch 0/24
  253. ----------
  254. Exception ignored in: <bound method _DataLoaderIter.__del__ of <torch.utils.data.dataloader._DataLoaderIter object at 0x7fd82a3f0320>>
  255. Traceback (most recent call last):
  256.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 399, in __del__
  257.     self._shutdown_workers()
  258.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 378, in _shutdown_workers
  259.     self.worker_result_queue.get()
  260.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/queues.py", line 337, in get
  261.     return _ForkingPickler.loads(res)
  262.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
  263.     fd = df.detach()
  264.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/resource_sharer.py", line 57, in detach
  265.     with _resource_sharer.get_connection(self._id) as conn:
  266.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/resource_sharer.py", line 87, in get_connection
  267.     c = Client(address, authkey=process.current_process().authkey)
  268.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 494, in Client
  269.     deliver_challenge(c, authkey)
  270.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 722, in deliver_challenge
  271.     response = connection.recv_bytes(256)        # reject large message
  272.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 216, in recv_bytes
  273.     buf = self._recv_bytes(maxlength)
  274.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
  275.     buf = self._recv(4)
  276.   File "/scratch/sjn-p3/anaconda/anaconda3/lib/python3.6/multiprocessing/connection.py", line 379, in _recv
  277.     chunk = read(handle, remaining)
  278. ConnectionResetError: [Errno 104] Connection reset by peer
  279. Traceback (most recent call last):
  280.   File "loocv_tl.py", line 231, in <module>
  281.     model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloader, num_epochs=25) # do I add this line here?
  282.   File "loocv_tl.py", line 78, in train_model
  283.     for inputs, labels in dataloaders[phase]:
  284. TypeError: 'DataLoader' object is not subscriptable
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement