Advertisement
Guest User

Untitled

a guest
Jun 25th, 2019
118
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.54 KB | None | 0 0
  1. import torch
  2. import torchvision
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import torch.nn as nn
  6. import torch.nn.functional as F
  7. import torch.optim as optim
  8. from torch.utils.data import Dataset
  9. import time
  10.  
  11.  
  12. class Net(nn.Module):
  13. def __init__(self):
  14. super(Net, self).__init__()
  15. self.conv1 = nn.Conv2d(3, 6, 5, 2, 2)
  16. self.pool = nn.MaxPool2d(3, 2)
  17. self.conv2 = nn.Conv2d(6, 16, 5, 2, 2)
  18. self.fc1 = nn.Linear(784, 240)
  19. self.fc2 = nn.Linear(240, 84)
  20. self.fc3 = nn.Linear(84, 12)
  21. self.dropout = nn.Dropout(p=0.5)
  22.  
  23. def forward(self, x):
  24. x = self.pool(F.relu(self.conv1(x)))
  25. x = self.pool(F.relu(self.conv2(x)))
  26. x = x.view(-1, self.num_flat_features(x))
  27. x = self.dropout(x)
  28. x = F.relu(self.fc1(x))
  29. x = F.relu(self.fc2(x))
  30. x = self.fc3(x)
  31. return x
  32.  
  33. def num_flat_features(self, x):
  34. size = x.size()[1:] # all dimensions except the batch dimension
  35. num_features = 1
  36. for s in size:
  37. num_features *= s
  38. return num_features
  39.  
  40.  
  41. class Memory(Dataset):
  42. def __init__(self, dataset_array, dataset_labels):
  43. self.labels = dataset_labels.astype(np.float64)
  44. self.images = dataset_array.astype(np.float64)
  45. def __len__(self):
  46. return self.images.shape[0]
  47. def __getitem__(self, idx):
  48. return self.images[idx], self.labels[idx]
  49.  
  50.  
  51. def dataloader(batch_size, num_workers=4):
  52. print("Data loading...")
  53. training_images = np.load("training_images.npy")
  54. training_labels = np.load("training_labels.npy")
  55. trainset = Memory(training_images, training_labels)
  56. trainset_length = trainset.__len__()
  57. trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=False)
  58. cv_images = np.load("cv_images.npy")
  59. cv_labels = np.load("cv_labels.npy")
  60. cvset = Memory(cv_images, cv_labels)
  61. cvset_length = cvset.__len__()
  62. cvloader = torch.utils.data.DataLoader(cvset, batch_size=cvset_length, shuffle=False, num_workers=num_workers, pin_memory=False)
  63. test_images = np.load("test_images.npy")
  64. test_labels = np.load("test_labels.npy")
  65. testset = Memory(test_images, test_labels)
  66. testset_length = testset.__len__()
  67. testloader = torch.utils.data.DataLoader(testset, batch_size=testset_length, shuffle=False, num_workers=num_workers, pin_memory=False)
  68. trainloader2 = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=num_workers, pin_memory=False)
  69. testloader2 = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=num_workers, pin_memory=False)
  70. print("Data loaded.")
  71. return trainloader, cvloader, testloader, trainset_length, trainloader2, testloader2
  72.  
  73.  
  74. def training(trainloader, cvloader, testloader, trainset_length, batch_size, net, opt, epochs, lr, weight_decay, step_size, gamma=0.1):
  75. criterion = nn.CrossEntropyLoss()
  76. adam = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
  77. adagrad = optim.Adagrad(net.parameters(), lr=lr, weight_decay=weight_decay)
  78. sgd = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=weight_decay)
  79. if opt == 1:
  80. optimizer = adam
  81. elif opt == 2:
  82. optimizer = adagrad
  83. else:
  84. optimizer = sgd
  85. scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
  86. best_acc = 0
  87.  
  88. # print("Training started...")
  89. since = time.time()
  90. for epoch in range(epochs): # loop over the dataset multiple times
  91. net.train()
  92. running_loss = 0.0
  93. for i, data in enumerate(trainloader, 0):
  94. # get the inputs
  95. inputs, labels = data
  96. inputs = inputs.float().to(device)
  97. labels = labels.long().to(device)
  98.  
  99. # zero the parameter gradients
  100. optimizer.zero_grad()
  101.  
  102. # forward + backward + optimize
  103. outputs = net(inputs)
  104. loss = criterion(outputs, labels)
  105. loss.backward()
  106. optimizer.step()
  107.  
  108. # print statistics
  109. running_loss += loss.item()
  110.  
  111. running_loss = running_loss / (trainset_length // batch_size)
  112. # print('Epoch: %d Loss: %.5f' % (epoch + 1, running_loss))
  113.  
  114. cv_acc = accshow(cvloader, net)
  115. if cv_acc > best_acc:
  116. best_acc = cv_acc
  117. torch.save(net, "train_model.pth")
  118. best_epoch = epoch + 1
  119. best_epoch_lr = epoch + 1
  120. best_lr = lr
  121.  
  122. if (epoch + 1) - best_epoch_lr >= 50:
  123. lr = lr / 10
  124. if opt == 1:
  125. optimizer = adam
  126. elif opt == 2:
  127. optimizer = adagrad
  128. else:
  129. optimizer = sgd
  130. best_epoch_lr = best_epoch_lr + 30
  131. # print("Learning rate changed to %f" % lr)
  132.  
  133. if (epoch + 1) - best_epoch >= 100:
  134. break
  135.  
  136. # if epoch % 20 == 19:
  137. # print('Train images accuracy: %d%s' % (accshow(trainloader, net), "%"))
  138. # print('Validation images accuracy: %d%s' % (cv_acc, "%"))
  139. # print('Test images accuracy: %d%s' % (accshow(testloader, net), "%"))
  140.  
  141. scheduler.step()
  142. time_elapsed = time.time() - since
  143. # print('Training completed in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
  144. best_model = torch.load("train_model.pth")
  145. return best_model, best_epoch, best_lr
  146.  
  147.  
  148. def checking(trainloader, cvloader, testloader, trainset_length, batch_size, epochs, initial_lr, step_size, loops):
  149. cv_score, test_score = [0, 0]
  150. opt_list = [1, 2, 3]
  151. decay_list = [0.05, 0.03, 0.01, 0.005, 0.003, 0.001, 0.0005, 0.0003, 0.0001]
  152. print("Checking started...")
  153. since = time.time()
  154. for opt in opt_list:
  155. for weight in decay_list:
  156. for i in range(loops):
  157. net = Net()
  158. net = net.to(device)
  159. trained_net, best_epoch, lr = training(trainloader, cvloader, testloader, trainset_length, batch_size, net, opt, epochs, initial_lr, weight, step_size)
  160. trained_net = trained_net.to(device)
  161. train_acc = accshow(trainloader, trained_net)
  162. cv_acc = accshow(cvloader, trained_net)
  163. test_acc = accshow(testloader, trained_net)
  164. print("Train: %d%s CV: %d%s Test: %d%s Opt: %d Rate: %.4f Decay: %.4f Loop: %d Epoch: %d" % (train_acc, "%", cv_acc, "%", test_acc, "%", opt, lr, weight, i+1, best_epoch))
  165. if cv_acc > cv_score:
  166. cv_score = cv_acc
  167. cv_train = train_acc
  168. cv_test = test_acc
  169. cv_opt = opt
  170. cv_rate = lr
  171. cv_decay = weight
  172. cv_epoch = best_epoch
  173. torch.save(trained_net, "check_model_val.pth")
  174. if test_acc > test_score:
  175. test_score = test_acc
  176. test_train = train_acc
  177. test_cv = cv_acc
  178. test_opt = opt
  179. test_rate = lr
  180. test_decay = weight
  181. test_epoch = best_epoch
  182. torch.save(trained_net, "check_model_test.pth")
  183. time_elapsed = time.time() - since
  184. print('Checking completed in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
  185. print("Best cross-validation set accuracy:")
  186. print("Train: %d%s CV: %d%s Test: %d%s Opt: %d Rate: %.4f Decay: %.4f Epoch: %d" % (cv_train, "%", cv_score, "%", cv_test, "%", cv_opt, cv_rate, cv_decay, cv_epoch))
  187. print("Best test set accuracy:")
  188. print("Train: %d%s CV: %d%s Test: %d%s Opt: %d Rate: %.4f Decay: %.4f Epoch: %d" % (test_train, "%", test_cv, "%", test_score, "%", test_opt, test_rate, test_decay, test_epoch))
  189. cv_net = torch.load("check_model_val.pth")
  190. test_net = torch.load("check_model_test.pth")
  191. return cv_net, test_net
  192.  
  193.  
  194. def accshow(loader, net):
  195. net.eval()
  196. correct = 0
  197. total = 0
  198. with torch.no_grad():
  199. for data in loader:
  200. inputs, labels = data
  201. inputs = inputs.float().to(device)
  202. labels = labels.long().to(device)
  203. outputs = net(inputs)
  204. _, predicted = torch.max(outputs.data, 1)
  205. total += labels.size(0)
  206. correct += (predicted == labels).sum().item()
  207. return (100 * correct / total)
  208.  
  209.  
  210. def classaccshow(loader, net, classes, classes_length):
  211. net.eval()
  212. class_correct = list(0. for i in range(classes_length))
  213. class_total = list(0. for i in range(classes_length))
  214. with torch.no_grad():
  215. for data in loader:
  216. inputs, labels = data
  217. inputs = inputs.float().to(device)
  218. labels = labels.long().to(device)
  219. outputs = net(inputs)
  220. _, predicted = torch.max(outputs, 1)
  221. c = (predicted == labels).squeeze()
  222. try:
  223. for i in range(100):
  224. label = labels[i]
  225. class_correct[label] += c[i].item()
  226. class_total[label] += 1
  227. except IndexError:
  228. pass
  229. for i in range(classes_length):
  230. print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
  231.  
  232.  
  233. def imshow(img):
  234. img = img * 0.13 + 0.5 # unnormalize
  235. npimg = img.numpy()
  236. plt.imshow(np.transpose(npimg, (1, 2, 0)))
  237. plt.show()
  238.  
  239.  
  240. def imgshow(loader, net, classes):
  241. net.eval()
  242. dataiter = iter(loader)
  243. inputs, labels = dataiter.next()
  244. imshow(torchvision.utils.make_grid(inputs))
  245. print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
  246. inputs = inputs.to(device)
  247. outputs = net(inputs)
  248. _, predicted = torch.max(outputs, 1)
  249. print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
  250.  
  251.  
  252.  
  253. if __name__ == '__main__':
  254.  
  255. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  256.  
  257. classes = ('alder', 'beech', 'birch', 'chestnut', 'gingko biloba', 'hornbeam', 'horse chestnut', 'linden', 'oak', 'oriental plane', 'pine', 'spruce')
  258. classes_length = len(classes)
  259. batch_size = 35
  260. trainloader, cvloader, testloader, trainset_length, trainloader2, testloader2 = dataloader(batch_size)
  261.  
  262. net = Net()
  263. net = net.to(device)
  264. epochs = 999
  265. step_size = epochs
  266. opt = 2
  267. learning_rate = 0.01
  268. weight_decay = 0.0005
  269.  
  270. # model, best_epoch, best_lr = training(trainloader, cvloader, testloader, trainset_length, batch_size, net, opt, epochs, learning_rate, weight_decay, step_size)
  271. # print("Best net score was in %d epoch with %f learning rate." % (best_epoch, best_lr))
  272. model, model_test = checking(trainloader, cvloader, testloader, trainset_length, batch_size, epochs, learning_rate, step_size, loops=3)
  273. # model = torch.load("best_val.pth", map_location=device)
  274. model = model.to(device)
  275. print("Final model:")
  276. print('Train images accuracy: %d%s' % (accshow(trainloader, model), "%"))
  277. print('Validation images accuracy: %d%s' % (accshow(cvloader, model), "%"))
  278. print('Test images accuracy: %d%s' % (accshow(testloader, model), "%"))
  279.  
  280.  
  281. # classaccshow(cvloader, model, classes, classes_length)
  282. # classaccshow(testloader, model, classes, classes_length)
  283. # imgshow(testloader2, model, classes)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement