Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- self.optimizer = optimizer(self.model.parameters(), lr=self.learning_rate) # This is in class init function
- # Training
- self.model.train()
- for iteration, batch in enumerate(train_set):
- # Next four lines of code prepare the inputs and targets
- landmark_batch = np.nan_to_num(batch[2].numpy())
- noisy_landmark_batch = add_noise_batch(landmark_batch)
- landmark_batch = np.array([np.concatenate((l[:,0],l[:,1])) for l in landmark_batch ])
- noisy_landmark_batch = np.array([np.concatenate((l[:,0],l[:,1])) for l in noisy_landmark_batch ])
- self.input.data.resize_(landmark_batch.shape).copy_(torch.from_numpy(noisy_landmark_batch) )
- self.target.data.resize_(landmark_batch.shape).copy_(torch.from_numpy(landmark_batch))
- outputs = self.model.forward(self.input)
- loss = self.loss_criterion(outputs,self.target)
- criterion_outputs += loss.data[0]
- loss.backward()
- self.optimizer.step()
- #Testing
- self.model.eval()
- for iteration, batch in enumerate(test_set):
- # Next four lines of code prepare the inputs and targets
- landmark_batch = np.nan_to_num(batch[2].numpy())
- noisy_landmark_batch = add_noise_batch(landmark_batch)
- landmark_batch = np.array([np.concatenate((l[:,0],l[:,1])) for l in landmark_batch ])
- noisy_landmark_batch = np.array([np.concatenate((l[:,0],l[:,1])) for l in noisy_landmark_batch ])
- self.test_input.data.resize_(landmark_batch.shape).copy_(torch.from_numpy(noisy_landmark_batch) )
- self.target.data.resize_(landmark_batch.shape).copy_(torch.from_numpy(landmark_batch))
- outputs = self.model.forward(self.test_input)
- loss = self.loss_criterion(outputs,self.target)
- criterion_outputs += loss.data[0]
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement