Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def __init__(self, bn=False):
- super(WSDR, self).__init__()
- # Network based on VGG16
- # ConvReLU means a conv layer following a relu
- self.conv1 = nn.Sequential(ConvReLU(3, 64, 3, pd=True, bn=bn),
- ConvReLU(64, 64, 3, pd=True, bn=bn),
- nn.MaxPool2d(2))
- self.conv2 = nn.Sequential(ConvReLU(64, 128, 3, pd=True, bn=bn),
- ConvReLU(128, 128, 3, pd=True, bn=bn),
- nn.MaxPool2d(2))
- self.conv3 = nn.Sequential(ConvReLU(128, 256, 3, pd=True, bn=bn),
- ConvReLU(256, 256, 3, pd=True, bn=bn),
- ConvReLU(256, 256, 3, pd=True, bn=bn),
- nn.MaxPool2d(2))
- self.conv4 = nn.Sequential(ConvReLU(256, 512, 3, pd=True, bn=bn),
- ConvReLU(512, 512, 3, pd=True, bn=bn),
- ConvReLU(512, 512, 3, pd=True, bn=bn),
- nn.MaxPool2d(2))
- self.conv5 = nn.Sequential(ConvReLU(512, 512, 3, pd=True, bn=bn),
- ConvReLU(512, 512, 3, pd=True, bn=bn),
- ConvReLU(512, 512, 3, pd=True, bn=bn))
- self.gap = nn.Sequential(ConvReLU(512, 1024, 3, pd=True, bn=bn),
- ConvReLU(1024, 20, 3, pd=True, bn=bn),
- nn.AvgPool2d(kernel_size=14, stride=14))
- # adding this fc layer, things go right
- # self.fc = nn.Linear(self.num_classes, self.num_classes)
- def forward(self, im_data):
- x = self.conv1(im_data)
- x = self.conv2(x)
- x = self.conv3(x)
- x = self.conv4(x)
- conv5features = self.conv5(x)
- gap = self.gap(conv5features)
- scores = gap.squeeze()
- # scores = self.fc(scores)
- return scores
- class ConvReLU(nn.Module):
- def __init__(self, in_ch, out_ch, kernel_sz, stride=1, relu=True, pd=True, bn=False):
- super(ConvReLU, self).__init__()
- padding = int((kernel_sz - 1) / 2) if pd else 0 # same spatial size by default
- self.conv = nn.Conv2d(in_ch, out_ch, kernel_sz, stride, padding=padding)
- self.bn = nn.BatchNorm2d(out_ch, eps=0.001, momentum=0, affine=True) if bn else None
- self.relu = nn.ReLU(inplace=True) if relu else None
- def forward(self, x):
- x = self.conv(x)
- if self.bn is not None:
- x = self.bn(x)
- if self.relu is not None:
- x = self.relu(x)
- return x
- # compute output
- output = model(input_var) # the above model, input_var:image data
- loss = F.multilabel_soft_margin_loss(output, target_var)
- # compute gradient and do SGD step
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement