Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import shutil
- import os
- import numpy as np
- import imageio
- import matplotlib.pyplot as plt
- from cv2 import cv2
- from torch.optim import Adam
- from torch.nn import CrossEntropyLoss
- from torch.autograd.variable import Variable
- from vision.nn.action_recognition import ActionNet
- from sklearn.model_selection import StratifiedShuffleSplit
- os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
- init_lr = 0.0001
- num_epoch = 1000
- batch_size = 4
- resize_size = (32, 32)
- dataset_path = 'data/video_dataset/'
- class_list = {
- 'ambil': 0,
- 'simpan': 1
- }
- train_img = []
- class_img = []
- # LOADING TRAINING DATA
- for clss in class_list.keys():
- img_path = dataset_path + clss +'/export/'
- list_img = os.listdir(img_path)
- for img_name in list_img:
- if img_name[0] == 'a':
- class_name = 'ambil'
- elif img_name[0] == 's':
- class_name = 'simpan'
- load_img_path = img_path + img_name
- # reading the image
- img = imageio.imread(load_img_path, as_gray=True)
- # normalizing the pixel values
- img = cv2.resize(img, resize_size)
- train_img.append(img)
- class_img.append(class_list[class_name])
- # converting the list to numpy array
- train_x = np.array(train_img)
- # defining the target
- train_y = np.array(class_img)
- dataset_split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
- dataset_split.get_n_splits(train_x, train_y)
- for train_index, test_index in dataset_split.split(train_x, train_y):
- # print("TRAIN:", train_index, "TEST:", test_index)
- xtrain, xtest = train_x[train_index], train_x[test_index]
- ytrain, ytest = train_y[train_index], train_y[test_index]
- #convert to tensor
- xtrain = torch.from_numpy(xtrain)
- xtest = torch.from_numpy(xtest)
- xtrain = xtrain.unsqueeze(1)
- xtest = xtest.unsqueeze(1)
- ytrain = torch.from_numpy(ytrain).type(torch.int64)
- ytest = torch.from_numpy(ytest).type(torch.int64)
- # defining the model
- model = ActionNet(num_class=len(class_list.keys()))
- # defining the optimizer
- optimizer = Adam(model.parameters(), lr=init_lr)
- # defining the loss function
- criterion = CrossEntropyLoss()
- # checking if GPU is available
- if torch.cuda.is_available():
- model = model.cuda()
- criterion = criterion.cuda()
- def train(epoch, X, Y):
- model.train()
- tr_loss = 0
- # getting the training set
- x_train, y_train = Variable(X), Variable(Y)
- # getting the validation set
- x_val, y_val = Variable(xtest), Variable(ytest)
- # converting the data into GPU format
- if torch.cuda.is_available():
- x_train = x_train.cuda()
- y_train = y_train.cuda()
- x_val = x_val.cuda()
- y_val = y_val.cuda()
- # clearing the Gradients of the model parameters
- optimizer.zero_grad()
- torch.cuda.empty_cache()
- # prediction for training and validation set
- output_train = model(x_train)
- output_val = model(x_val)
- # print(y_train)
- # computing the training and validation loss
- loss_train = criterion(output_train, y_train)
- loss_val = criterion(output_val, y_val)
- train_losses.append(loss_train)
- val_losses.append(loss_val)
- # computing the updated weights of all the model parameters
- loss_train.backward()
- optimizer.step()
- tr_loss = loss_train.item()
- if epoch%2 == 0:
- # printing the validation loss
- print('Epoch : ',epoch+1, '\t', 'loss :', loss_val)
- #save cekpoint every 10 epoch
- if epoch%9 == 0:
- checkpoint = {
- 'epoch': epoch + 1,
- 'state_dict': model.state_dict(),
- 'optimizer': optimizer.state_dict(),
- 'loss': loss_train
- }
- save_ckp(checkpoint, False, 'data/checkpoint/', '')
- def save_ckp(state, is_best, checkpoint_dir, best_model_dir):
- f_path = checkpoint_dir + 'checkpoint.pt'
- torch.save(state, f_path)
- if is_best:
- best_fpath = best_model_dir + 'best_model.pt'
- shutil.copyfile(f_path, best_fpath)
- if __name__ == "__main__":
- train_losses = []
- # empty list to store validation losses
- val_losses = []
- # training the model
- for epoch in range(num_epoch):
- print(epoch)
- for i in range(batch_size):
- # Local batches and labels
- local_X, local_y = xtrain[i*batch_size:(i+1)*batch_size,], ytrain[i*batch_size:(i+1)*batch_size]
- if local_X.shape[0] != 0:
- train(epoch, local_X, local_y)
- print(str(epoch)+"----")
- plt.plot(train_losses, label='Training loss')
- plt.plot(val_losses, label='Validation loss')
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement