Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function
- import argparse
- import numpy as np
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- from torch.autograd import Variable
- from torchvision import datasets, transforms, models
- import models_densenet
- import sys
- #from pytorch.models import *
- # Training settings
- parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
- parser.add_argument('--batch-size', type=int, default=64, metavar='N',
- help='input batch size for training (default: 64)')
- parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
- help='input batch size for testing (default: 1000)')
- parser.add_argument('--epochs', type=int, default=10, metavar='N',
- help='number of epochs to train (default: 10)')
- parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
- help='learning rate (default: 0.01)')
- parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
- help='SGD momentum (default: 0.5)')
- parser.add_argument('--no-cuda', action='store_true', default=False,
- help='enables CUDA training')
- parser.add_argument('--seed', type=int, default=1, metavar='S',
- help='random seed (default: 1)')
- parser.add_argument('--log-interval', type=int, default=100, metavar='N',
- help='how many batches to wait before logging training status')
- parser.add_argument('--dropout', type=bool, default=False,required=False)
- parser.add_argument('--data_root', type=str, default='/data/pytorch/', help='path to data')
- args = parser.parse_args()
- args.cuda = not args.no_cuda and torch.cuda.is_available()
- torch.manual_seed(args.seed)
- if args.cuda:
- torch.cuda.manual_seed(args.seed)
- kwargs = {'num_workers': 1, 'pin_memory':True} if args.cuda else {}
- transform = transforms.Compose([
- transforms.RandomCrop(32, padding=4),
- transforms.RandomHorizontalFlip(),
- transforms.ToTensor(),
- transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
- ])
- trn_data = datasets.CIFAR100(args.data_root+'/cifar100/', train=True, download=True, transform=transform)
- tst_data = datasets.CIFAR100(args.data_root+'/cifar100/', train=False, transform=transform)
- #print(vars(trn_data))
- train_loader = torch.utils.data.DataLoader(trn_data, batch_size=args.batch_size, shuffle=True, **kwargs)
- test_loader = torch.utils.data.DataLoader(tst_data, batch_size=args.batch_size, shuffle=True, **kwargs)
- def adjust_learning_rate(optimizer, epoch):
- """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
- lr = args.lr * (0.1 ** (epoch // 50))
- for param_group in optimizer.param_groups:
- param_group['lr'] = lr
- #model = Cifar10Quick(dropout=args.dropout)
- model = models.densenet121(num_classes=100)
- if args.cuda:
- model.cuda()
- optimizer = optim.Adam(model.parameters(), lr=args.lr)
- A = None
- A_test = None
- def train(epoch):
- model.train()
- for batch_idx, (data, target) in enumerate(train_loader):
- if args.cuda:
- data, target = data.float().cuda(), target.cuda()
- data, target = Variable(data), Variable(target)
- output, feats, t1, t2 = model(data)
- ce_loss = F.nll_loss(feats, target)
- total_loss = ce_loss
- optimizer.zero_grad()
- total_loss.backward()
- optimizer.step()
- if batch_idx % args.log_interval == 0:
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, SimplexLoss: {:.6f}'.format(
- epoch, batch_idx * len(data), len(train_loader.dataset),
- 100. * batch_idx / len(train_loader), ce_loss.data[0], simpl_loss.data[0]))
- def test(epoch):
- model.eval()
- test_loss = 0
- correct = 0
- for data, target in test_loader:
- if args.cuda:
- data, target = data.float().cuda(), target.cuda()
- data, target = Variable(data, volatile=True), Variable(target.long()).long()
- output, feats, t1, t2 = model(data)
- test_loss += F.nll_loss(feats, target).data[0]
- pred = feats.data.max(1)[1] # get the index of the max log-probability
- correct += pred.eq(target.data).cpu().sum()
- test_loss = test_loss
- test_loss /= len(test_loader) # loss function already averages over batch size
- print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
- test_loss, correct, len(test_loader.dataset),
- 100. * correct / len(test_loader.dataset)))
- for epoch in range(1, args.epochs + 1):
- train(epoch)
- adjust_learning_rate(optimizer, epoch)
- test(epoch)
Add Comment
Please, Sign In to add comment