Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch import nn
- print("torch Version:", torch.__version__)
- class ConvolutionalBlock(nn.Module):
- def __init__(self, input_channels, output_channels, discriminator=False, use_activation=True,
- use_batch_normalization=True, **kwargs):
- super().__init__()
- self.use_activation = use_activation
- self.cnn = nn.Conv2d(input_channels, output_channels, **kwargs, bias=not use_batch_normalization)
- self.use_batch_normalization = nn.BatchNorm2d(output_channels) if use_batch_normalization else nn.Identity()
- self.use_activation = (
- nn.LeakyReLU(0.2, inplace=True) if discriminator else nn.PReLU(num_parameters=output_channels)
- )
- def forward(self, x):
- return self.use_activation(self.use_batch_normalization(self.cnn(x)) if self.use_activation else
- self.use_batch_normalization(self.cnn(x)))
- class UpsampleBlock(nn.Module):
- def __init__(self, input_channels, scale_factor):
- super().__init__()
- self.convolutional_layer = nn.Conv2d(input_channels, (input_channels * scale_factor) ** 2, 2, 3, 1, 1)
- # Where, Input Channels * 4, Height, Width --> Input Channels, Height * 2, Width * 2:
- self.pixel_shuffle = nn.PixelShuffle(scale_factor)
- self.activation = nn.PReLU(num_parameters=input_channels)
- def forward(self, x):
- return self.activate(self.pixel_shuffle(self.convolutional_layer(x)))
- class ResidualBlock(nn.Module):
- def __init__(self, input_channels):
- super().__init__()
- self.block1 = ConvolutionalBlock(input_channels, input_channels, kernel_size=3, stride=1, padding=1)
- self.block2 = ConvolutionalBlock(input_channels, input_channels, kernel_size=3, stride=1, padding=1,
- use_activation=False)
- def forward(self, x):
- output = self.block1(x)
- output = self.block2(output)
- return output + x
- class Generator(nn.Module):
- def __init__(self, input_channels=3, number_of_channels=64, number_of_blocks=16):
- super().__init__()
- self.initialize = ConvolutionalBlock(input_channels, number_of_channels, kernel_size=9, stride=1, padding=4,
- use_batch_normalization=False)
- self.residual_layers = nn.Sequential(*[ResidualBlock(number_of_channels) for _ in range(number_of_blocks)])
- self.convolutional_block = ConvolutionalBlock(input_channels, number_of_channels, kernel_size=9, stride=1,
- padding=4, use_activation=False)
- self.upsamples = nn.Sequential(UpsampleBlock(number_of_channels, 2), UpsampleBlock(number_of_channels, 2))
- self.final_layer = nn.Conv2d(number_of_channels, input_channels, kernel_size=9, stride=1, padding=4)
- def forward(self, x):
- initialise = self.initialize(x)
- x = self.residual_layers(initialise)
- x = self.convolutional_block(x) + initialise
- x = self.upsamples(x)
- return torch.tanh(self.final_layer(x))
- class Discriminator(nn.Module):
- def __init__(self, input_channels=3, features=[64, 64, 128, 128, 256, 256, 512, 512]):
- super().__init__()
- blocks = []
- for idx, feature in enumerate(features):
- blocks.append(
- ConvolutionalBlock(
- input_channels, feature, kernel_size=3, stride=1 + (idx % 2), padding=1, discriminator=True,
- use_activation=True, use_batch_normalization=False if idx == 0 else True
- )
- )
- input_channels = feature
- self.blocks = nn.Sequential(*blocks)
- self.classifier = nn.Sequential(
- nn.AdaptiveAvgPool2d((6, 6)),
- nn.Flatten(),
- nn.Linear((512 * 6 * 6), 1024),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(1024 * 1)
- )
- def forward(self, x):
- x = self.blocks(x)
- return self.classifier(x)
- def test():
- low_resolution = 24
- with torch.cuda.amp.autocast():
- x = torch.randn(5, 3, low_resolution, low_resolution)
- generator = Generator()
- generator_output = generator(x) # <--- The error points here
- discriminator = Discriminator()
- discriminator_output = discriminator(generator_output)
- print("Generator Output Shape: {}".format(generator_output.shape) + "\n" +
- "Discriminator Output Shape: {}".format(discriminator_output.shape))
- test()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement