Advertisement
Guest User

Untitled

a guest
Nov 25th, 2017
62
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.26 KB | None | 0 0
  1. from numpy import genfromtxt
  2. import torch
  3. import torch.nn as nn
  4. import torchvision.datasets as dsets
  5. import torchvision.transforms as transforms
  6. from torch.autograd import Variable
  7. import torch.utils.data as Data
  8. import os
  9.  
  10. # Hyper Parameters
  11. input_size = 63
  12. hidden_size1 = 512
  13. hidden_size2 = 512
  14. num_classes = 2
  15. num_epochs = 256
  16. batch_size = 128
  17. learning_rate = 0.001
  18.  
  19. data = genfromtxt('input.csv', delimiter=',')
  20. labels = genfromtxt('output.csv', delimiter=',')
  21.  
  22. input_data = data[:4500]
  23. test_input = data[4500:]
  24. output_data = labels[:4500]
  25. test_output = labels[4500:]
  26.  
  27. train_dataset = Data.TensorDataset(data_tensor = torch.from_numpy(input_data).float(), target_tensor = torch.from_numpy(output_data).long())
  28. test_dataset = Data.TensorDataset(data_tensor = torch.from_numpy(test_input).float(), target_tensor = torch.from_numpy(test_output).long())
  29. print(torch.from_numpy(output_data).int())
  30. # Data Loader (Input Pipeline)
  31. train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
  32.                                            batch_size=batch_size,
  33.                                            shuffle=True)
  34. test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
  35.                                            batch_size=batch_size,
  36.                                            shuffle=False)
  37.  
  38. #Make a dictionary defining training and validation sets
  39. dataloders = dict()
  40. dataloders['train'] = train_loader
  41. dataloders['val'] = test_loader
  42.  
  43. dataset_sizes = {'train': 4500, 'val': 550}
  44. use_gpu = torch.cuda.is_available()
  45.  
  46. class Net(nn.Module):
  47.     def __init__(self, input_size, hidden_size1, hidden_size2, num_classes):
  48.         super(Net, self).__init__()
  49.         self.fc1 = nn.Sequential(
  50.             nn.Linear(input_size, hidden_size1),
  51.             nn.ReLU())
  52.         self.fc2 = nn.Sequential(
  53.             nn.Linear(hidden_size1, hidden_size2),
  54.             nn.ReLU())
  55.         self.fc3 = nn.Sequential(
  56.             nn.Linear(hidden_size2, num_classes))
  57.  
  58.     def forward(self, x):
  59.         out = self.fc1(x)
  60.         out = self.fc2(out)
  61.         out = self.fc3(out)
  62.         return out
  63.  
  64. def train_model(model, criterion, optimizer, num_epochs):
  65.     f = open("Iterations.txt", "w+")
  66.     best_model_wts = model.state_dict()
  67.     best_val_acc = 0.0
  68.     best_train_acc = 0.0
  69.     for epoch in range(num_epochs):
  70.         print('Epoch {}/{}'.format(epoch, num_epochs - 1))
  71.         print('-' * 10)
  72.         for phase in ['train', 'val']:
  73.             if phase == 'train':
  74.                 model.train(True)  # Set model to training mode
  75.             else:
  76.                 model.train(False)  # Set model to evaluate mode
  77.             running_loss = 0.0
  78.             running_corrects = 0
  79.             # Iterate over data.
  80.             for data in dataloders[phase]:
  81.                 # get the inputs
  82.                 inputs, label = data
  83.                 # wrap them in Variable
  84.                 if use_gpu:
  85.                     inputs = Variable(inputs.cuda())
  86.                     labels = Variable(label.cuda())
  87.                 else:
  88.                     inputs, labels = Variable(inputs), Variable(label)
  89.                 # zero the parameter gradients
  90.                 optimizer.zero_grad()
  91.                 # forward
  92.                 outputs = model(inputs)
  93.                 _, preds = torch.max(outputs.data, 1)
  94.                 loss = criterion(outputs, labels)
  95.                 # backward + optimize only if in training phase
  96.                 if phase == 'train':
  97.                     loss.backward()
  98.                     optimizer.step()
  99.                 # statistics
  100.                 running_loss += loss.data[0]
  101.                 running_corrects += torch.sum(preds == label)
  102.             epoch_loss = running_loss / dataset_sizes[phase]
  103.             epoch_acc = running_corrects / dataset_sizes[phase]
  104.             #Print it out Loss and Accuracy and in the file torchvision
  105.             print('{} Loss: {:.8f} Accuracy: {:.4f}'.format(phase, epoch_loss, epoch_acc))
  106.             f.write('{} Loss: {:.8f} Accuracy: {:.4f}\n'.format(phase, epoch_loss, epoch_acc))
  107.             # deep copy the model
  108.             if phase == 'val' and epoch_acc > best_val_acc:
  109.                 best_val_acc = epoch_acc
  110.                 best_model_wts = model.state_dict()
  111.             if phase == 'train' and epoch_acc > best_train_acc:
  112.                 best_train_acc = epoch_acc
  113.                 best_model_wts = model.state_dict()
  114.     f.close()
  115.     print('Best val Acc: {:4f}'.format(best_val_acc))
  116.     model.load_state_dict(best_model_wts)
  117.     return model, best_train_acc, best_val_acc
  118.  
  119.  
  120. net = Net(input_size, hidden_size1, hidden_size2, num_classes)
  121. # Loss and Optimizer
  122. criterion = nn.CrossEntropyLoss()
  123. optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
  124. if use_gpu:
  125.     model_ft, train_acc, test_acc = train_model(net.cuda(), criterion, optimizer, num_epochs)
  126. else:
  127.     model_ft, train_acc, test_acc = train_model(net, criterion, optimizer, num_epochs)
  128.  
  129. torch.save(model_ft.state_dict(), 'yas.pkl')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement