Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ################################################################################
- # TODO: #
- # Experiment with any architectures, optimizers, and hyperparameters. #
- # Achieve AT LEAST 70% accuracy on the *validation set* within 10 epochs. #
- # #
- # Note that you can use the check_accuracy function to evaluate on either #
- # the test set or the validation set, by passing either loader_test or #
- # loader_val as the second argument to check_accuracy. You should not touch #
- # the test set until you have finished your architecture and hyperparameter #
- # tuning, and only run the test set once at the end to report a final value. #
- ################################################################################
- model = None
- optimizer = None
- epochs = 10 #should be 10
- # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
- channel_0 = 3
- channel_1 = 36
- channel_2 = 66
- channel_3 = 126
- channel_4 = 254
- channel_5 = 254
- out_conv_size = channel_5 * 32 * 32
- classes = 10
- learning_rate = 5e-4
- hidden_layer_size = 200
- model = nn.Sequential(
- nn.BatchNorm2d(channel_0),
- nn.Conv2d(channel_0,channel_1,kernel_size=3,padding=1),
- nn.PReLU(channel_1),
- nn.Dropout2d(p=0.05),
- nn.MaxPool2d(2),
- nn.BatchNorm2d(channel_1),
- nn.Conv2d(channel_1,channel_2,kernel_size=3,padding=1),
- nn.PReLU(channel_2),
- nn.Dropout2d(p=0.05),
- nn.MaxPool2d(2),
- nn.BatchNorm2d(channel_2),
- nn.Conv2d(channel_2,channel_3,kernel_size=3,padding=1),
- nn.PReLU(channel_3),
- nn.Dropout2d(p=0.05),
- nn.MaxPool2d(2),
- nn.BatchNorm2d(channel_3),
- nn.Conv2d(channel_3,channel_4,kernel_size=3,padding=1),
- nn.PReLU(channel_4),
- nn.Dropout2d(p=0.05),
- nn.MaxPool2d(2),
- nn.BatchNorm2d(channel_4),
- nn.Conv2d(channel_4,channel_5,kernel_size=3,padding=1),
- nn.PReLU(channel_5),
- nn.Dropout2d(p=0.05),
- nn.MaxPool2d(2),
- Flatten(),
- nn.BatchNorm1d(channel_5),
- nn.Linear(channel_5, hidden_layer_size),
- nn.PReLU(hidden_layer_size),
- nn.BatchNorm1d(hidden_layer_size),
- nn.Linear(hidden_layer_size, classes),
- )
- # you can use Nesterov momentum in optim.SGD
- #optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.4, nesterov=True)
- optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.05, amsgrad=True, betas=(0.5,0.75))
- # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
- ################################################################################
- # END OF YOUR CODE
- ################################################################################
- # You should get at least 70% accuracy
- train_part34(model, optimizer, epochs=epochs)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement