Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def __init__ (self):
- super(Net, self).__init__()
- self.conv1 = nn.Conv1d(in_channels = n_input_channels, out_channels = 40, kernel_size = 9, padding= 4)
- self.conv2 = nn.Conv1d(in_channels = 40, out_channels = 80, kernel_size = 3) #.double
- self.pool1 = nn.MaxPool1d(3)
- self.fc1 = nn.Linear(2484, 200)
- self.fc2 = nn.Linear(200, 200)
- self.fc3 = nn.Linear(200, 200)
- self.fc4 = nn.Linear(200, 99)
- def forward(self, x):
- x2 = x[:, :, :4].contiguous().view(x.size(0), -1)
- x1 = x[:, :, 4:]
- x1 = F.relu(self.conv1(x1))
- x1 = self.pool1(x1)
- x1 = F.relu(self.conv2(x1))
- x1 = x1.view(x1.size(0), -1
- x = torch.cat((x1,x2), 1)
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- x = F.relu(self.fc3(x))
- x = F.softplus(self.fc4(x))
- return x
- def init_weights(m):
- if type(m) == nn.Linear:
- nn.init.xavier_uniform_(m.weight)
- m.bias.data.fill_(0.01)
- net = Net()
- net.apply(init_weights)
Add Comment
Please, Sign In to add comment