Advertisement
Guest User

Untitled

a guest
Feb 18th, 2020
133
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 5.19 KB | None | 0 0
  1. # model.py ---
  2. #
  3. # Filename: model.py
  4. # Description:
  5. # Author: Kwang Moo Yi
  6. # Maintainer:
  7. # Created: Thu Jan 24 17:28:40 2019 (-0800)
  8. # Version:
  9. #
  10.  
  11. # Commentary:
  12. #
  13. #
  14. #
  15. #
  16.  
  17. # Change Log:
  18. #
  19. #
  20. #
  21. # Copyright (C), Visual Computing Group @ University of Victoria.
  22.  
  23. # Code:
  24.  
  25. import numpy as np
  26. import torch
  27. from torch import nn
  28.  
  29.  
  30. class MyNetwork(nn.Module):
  31.     """Network class """
  32.  
  33.     def __init__(self, config, input_shp, mean=None, std=None):
  34.         """Initialization of the model.
  35.  
  36.        Parameters
  37.        ----------
  38.  
  39.        config:
  40.            Configuration object that holds the command line arguments.
  41.  
  42.        input_shp: tuple or list
  43.            Shape of each input data sample.
  44.  
  45.        mean: np.array
  46.            Mean value to be used for data normalization. We will store this in
  47.            a torch.Parameter
  48.  
  49.        std: np.array
  50.            Std value to be used for data normalization. We will store this in
  51.            a torch.Parameter
  52.        """
  53.  
  54.         # Run initialization for super class
  55.         super(MyNetwork, self).__init__()
  56.  
  57.         # Store configuration
  58.         self.config = config
  59.  
  60.         # TODO (5 points): Create torch.Tensor for holding mean, std. We will
  61.         # apply these later, and if nothing is given, we would want to do
  62.         # nothing to our data, i.e. mean should be set to zeros, std should be
  63.         # set to ones. We also want all tensors to be `float32`
  64.         if mean is None and std is None:
  65.             mean_tensor = torch.zeros(input_shp, dtype=torch.float32)
  66.             std_tensor = torch.ones(input_shp, dtype=torch.float32)
  67.         elif mean is not None and std is not None:
  68.             mean_tensor = torch.Tensor(mean.astype(np.float32))
  69.             std_tensor = torch.Tensor(std.astype(np.float32))
  70.         else:
  71.             raise Exception("One of mean or std was None, but the other was not.")
  72.  
  73.  
  74.         # TODO (5 points): Wrap the created Tensors as parameters, so that we
  75.         # can easily save and load (we can later get a list of everything
  76.         # inside the morel by doing model.parameters(). Also make sure we mark
  77.         # that these will not be updated by the optimizer by saying that
  78.         # gradient computation should not be performed
  79.         self.mean = nn.Parameter(mean_tensor, requires_grad=False)
  80.         self.std = nn.Parameter(std_tensor, requires_grad=False)
  81.  
  82.         # If mean, std is provided, update values accordingly
  83.         if mean is not None and std is not None:
  84.             #
  85.             # ALTHOUGH THIS IS NOT PART OF YOUR IMPLEMENTATION, SEE BELOW.
  86.             #
  87.             # Note here that we use [:] so that actually assign the values, not
  88.             # simply change the reference.
  89.             self.mean[:] = torch.from_numpy(mean.astype(np.float32))
  90.             self.std[:] = torch.from_numpy(std.astype(np.float32))
  91.  
  92.         # TODO: (5 points) We'll create `config.num_hidden` number of linear
  93.         # layers, which each has `config.num_unit` of outputs. We will also
  94.         # connect them with ReLU activation functions (see torch.nn.ReLU). We
  95.         # will procedurally generate them as class attributes according to the
  96.         # configurations. `setattr` Python builtin will be helpful here.
  97.         self.num_hidden = config.num_hidden
  98.         indim = input_shp[0]
  99.         self.linear0 = nn.Linear(indim,\
  100.                 config.num_unit)
  101.         self.relu0 = nn.ReLU(inplace=True)
  102.  
  103.         for i in range(1, config.num_hidden-1):
  104.             setattr(self, "linear"+str(i),\
  105.                     nn.Linear(config.num_unit, config.num_unit))
  106.             setattr(self, "relu"+str(i), nn.ReLU(inplace=True))
  107.  
  108.         setattr(self, "linear"+str(config.num_hidden-1),\
  109.                 nn.Linear(config.num_unit, config.num_class))
  110.         setattr(self, "relu"+str(config.num_hidden-1), nn.ReLU(inplace=True))
  111.  
  112.         self.output = nn.Linear(indim, config.num_class)
  113.  
  114.     def forward(self, x):
  115.         """Forward pass for the model
  116.  
  117.        Parameters
  118.        ----------
  119.  
  120.        x: torch.Tensor
  121.            Input data for the model to be applied. Note that this data is
  122.            typically in the shape of BCHW or BC, where B is the number of
  123.            elements in the batch, and C is the number of dimension of our
  124.            feature. H, W is when we use raw images. In the current assignment,
  125.            it wil l be of shape BC.
  126.  
  127.        Returns
  128.        -------
  129.  
  130.        x: torch.Tensor
  131.  
  132.            We will reuse the variable name, because often the case it's more
  133.            convenient to do so. We will first normalize the input, and then
  134.            feed it to our linear layer by simply calling the layer as a
  135.            function with normalized x as argument.
  136.  
  137.        """
  138.  
  139.         # TODO (5 points): Normalize data.
  140.         x = (x-self.mean)/self.std
  141.  
  142.         # TODO: (5 points)Apply layers. One thing that could be helpful is
  143.         # the `getattr` Python builtin, which is the opposite of setattr.
  144.         # above.
  145.         for i in range(self.num_hidden):
  146.             x = getattr(self, "linear"+str(i))(x)
  147.             x = getattr(self, "relu"+str(i))(x)
  148.             x = nn.ReLU(x)
  149.  
  150.         return x
  151.  
  152.  
  153. #
  154. # model.py ends here
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement