Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # First, we extend the VGG16 model with our new classifier
- e2e_model = models.vgg16(pretrained=True)
- e2e_model.train()
- classifier = list(e2e_model.classifier.children())[:-1]
- classifier.append(torch.nn.Linear(4096, 512))
- classifier.append(torch.nn.Sigmoid())
- classifier.append(torch.nn.Linear(512, 80))
- # Second sigmoid layer is dropped as it's included in the loss calculation
- e2e_model.classifier = nn.Sequential(*classifier)
- # The output data is prepared by representing each output as a binary vector of categories
- class TrainData(Dataset):
- __xs = []
- __ys = []
- def __init__(self):
- for train_id in train_ids:
- # Create the 80 dimensional vector label
- y_vector = np.zeros(80)
- for category in train_id_to_categories[train_id]:
- idx = category_to_idx[category]
- y_vector[idx] = 1
- self.__xs.append(train_id)
- self.__ys.append(y_vector)
- def __getitem__(self, index):
- image_id = self.__xs[index]
- labels = self.__ys[index]
- # Load and return tensor representation of image with labels
- image = Image.open(train_id_to_file[image_id]).convert('RGB')
- image_tensor = loader(image).float()
- return image_tensor, labels
- def __len__(self):
- return len(self.__xs)
- def train(model, learning_rate=0.0001, batch_size=50, epochs=1):
- """
- Training function which takes as input a model, a learning rate and a batch size.
- After completing a full pass over the data, the function exists, and the input model will be trained.
- """
- train_data = TrainData()
- train_loader = torch.utils.data.DataLoader(dataset=train_data,
- batch_size=batch_size,
- shuffle=True)
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
- model.train()
- for epoch in range(epochs):
- for batch_idx, (data, target) in enumerate(train_loader):
- # Get the inputs
- target = target.type(torch.FloatTensor)
- data, target = data.cuda(), target.cuda()
- data, target = Variable(data), Variable(target)
- # Zero the parameter gradients
- optimizer.zero_grad()
- # Forward + Backward + Optimize
- output = model(data)
- loss = F.multilabel_soft_margin_loss(output, target)
- loss.backward()
- optimizer.step()
- # Print statistics
- if batch_idx % 25 == 0:
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
- epoch+1, batch_idx * len(data), len(train_loader.dataset),
- 100. * batch_idx / len(train_loader), loss.data[0]))
- # Finally train the model
- start = time.time()
- train(e2e_model.cuda())
- # Print elapsed time in seconds
- end = time.time()
- print(str(end - start) + " seconds elapsed")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement