Advertisement
Guest User

cnntrain

a guest
Jun 10th, 2020
131
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.65 KB | None | 0 0
  1. import torch
  2. import shutil
  3. import os
  4. import numpy as np
  5. import imageio
  6. import matplotlib.pyplot as plt
  7. from cv2 import cv2
  8. from torch.optim import Adam
  9. from torch.nn import CrossEntropyLoss
  10. from torch.autograd.variable import Variable
  11. from vision.nn.action_recognition import ActionNet
  12. from sklearn.model_selection import StratifiedShuffleSplit
  13. os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
  14. init_lr = 0.0001
  15. num_epoch = 1000
  16. batch_size = 4
  17. resize_size = (32, 32)
  18.  
  19. dataset_path = 'data/video_dataset/'
  20. class_list = {
  21.         'ambil': 0,
  22.         'simpan': 1
  23.     }
  24.  
  25. train_img = []
  26. class_img = []
  27.  
  28. # LOADING TRAINING DATA
  29. for clss in class_list.keys():
  30.     img_path = dataset_path + clss +'/export/'
  31.     list_img = os.listdir(img_path)
  32.     for img_name in list_img:
  33.        
  34.         if img_name[0] == 'a':
  35.             class_name = 'ambil'
  36.         elif img_name[0] == 's':
  37.             class_name = 'simpan'
  38.         load_img_path = img_path + img_name    
  39.         # reading the image
  40.         img = imageio.imread(load_img_path, as_gray=True)
  41.         # normalizing the pixel values
  42.  
  43.         img = cv2.resize(img, resize_size)
  44.         train_img.append(img)
  45.         class_img.append(class_list[class_name])
  46.  
  47.  
  48. # converting the list to numpy array
  49. train_x = np.array(train_img)
  50. # defining the target
  51. train_y = np.array(class_img)
  52.  
  53. dataset_split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
  54. dataset_split.get_n_splits(train_x, train_y)
  55.  
  56. for train_index, test_index in dataset_split.split(train_x, train_y):
  57.     # print("TRAIN:", train_index, "TEST:", test_index)
  58.  
  59.     xtrain, xtest = train_x[train_index], train_x[test_index]
  60.     ytrain, ytest = train_y[train_index], train_y[test_index]
  61.  
  62. #convert to tensor
  63. xtrain = torch.from_numpy(xtrain)
  64. xtest = torch.from_numpy(xtest)
  65. xtrain = xtrain.unsqueeze(1)
  66. xtest = xtest.unsqueeze(1)
  67. ytrain = torch.from_numpy(ytrain).type(torch.int64)
  68. ytest = torch.from_numpy(ytest).type(torch.int64)
  69.  
  70. # defining the model
  71. model = ActionNet(num_class=len(class_list.keys()))
  72. # defining the optimizer
  73. optimizer = Adam(model.parameters(), lr=init_lr)
  74. # defining the loss function
  75. criterion = CrossEntropyLoss()
  76. # checking if GPU is available
  77. if torch.cuda.is_available():
  78.     model = model.cuda()
  79.     criterion = criterion.cuda()
  80.  
  81.  
  82. def train(epoch, X, Y):
  83.    
  84.     model.train()
  85.     tr_loss = 0
  86.     # getting the training set
  87.     x_train, y_train = Variable(X), Variable(Y)
  88.     # getting the validation set
  89.     x_val, y_val = Variable(xtest), Variable(ytest)
  90.     # converting the data into GPU format
  91.     if torch.cuda.is_available():
  92.         x_train = x_train.cuda()
  93.         y_train = y_train.cuda()
  94.         x_val = x_val.cuda()
  95.         y_val = y_val.cuda()
  96.     # clearing the Gradients of the model parameters
  97.     optimizer.zero_grad()
  98.     torch.cuda.empty_cache()
  99.     # prediction for training and validation set
  100.     output_train = model(x_train)
  101.     output_val = model(x_val)
  102.  
  103.     # print(y_train)
  104.     # computing the training and validation loss
  105.     loss_train = criterion(output_train, y_train)
  106.     loss_val = criterion(output_val, y_val)
  107.     train_losses.append(loss_train)
  108.     val_losses.append(loss_val)
  109.  
  110.     # computing the updated weights of all the model parameters
  111.     loss_train.backward()
  112.     optimizer.step()
  113.     tr_loss = loss_train.item()
  114.     if epoch%2 == 0:
  115.         # printing the validation loss
  116.         print('Epoch : ',epoch+1, '\t', 'loss :', loss_val)
  117.  
  118.     #save cekpoint every 10 epoch
  119.     if epoch%9 == 0:
  120.         checkpoint = {
  121.             'epoch': epoch + 1,
  122.             'state_dict': model.state_dict(),
  123.             'optimizer': optimizer.state_dict(),
  124.             'loss': loss_train
  125.         }
  126.         save_ckp(checkpoint, False, 'data/checkpoint/', '')
  127.  
  128. def save_ckp(state, is_best, checkpoint_dir, best_model_dir):
  129.     f_path = checkpoint_dir + 'checkpoint.pt'
  130.     torch.save(state, f_path)
  131.     if is_best:
  132.         best_fpath = best_model_dir + 'best_model.pt'
  133.         shutil.copyfile(f_path, best_fpath)
  134.  
  135. if __name__ == "__main__":
  136.     train_losses = []
  137.     # empty list to store validation losses
  138.     val_losses = []
  139.     # training the model
  140.     for epoch in range(num_epoch):
  141.         print(epoch)
  142.         for i in range(batch_size):
  143.             # Local batches and labels
  144.             local_X, local_y = xtrain[i*batch_size:(i+1)*batch_size,], ytrain[i*batch_size:(i+1)*batch_size]
  145.             if local_X.shape[0] != 0:
  146.                 train(epoch, local_X, local_y)
  147.         print(str(epoch)+"----")
  148.    
  149.     plt.plot(train_losses, label='Training loss')
  150.     plt.plot(val_losses, label='Validation loss')
  151.     plt.legend()
  152.     plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement