Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import math
- import torch
- import torch.nn as nn
- class SparseLinear(nn.Module):
- def __init__(self, in_features, out_features, bias=True):
- super(SparseLinear, self).__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.weight = nn.Parameter(torch.Tensor(in_features, out_features))
- if bias:
- self.bias = nn.Parameter(torch.Tensor(out_features))
- else:
- self.register_parameter('bias', None)
- self.reset_parameters()
- def reset_parameters(self):
- nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
- if self.bias is not None:
- fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
- bound = 1 / math.sqrt(fan_in)
- nn.init.uniform_(self.bias, -bound, bound)
- def forward(self, input_sparse_tensor):
- return torch.sparse.mm(input_sparse_tensor, self.weight) + self.bias
- def extra_repr(self):
- return 'in_features={}, out_features={}, bias={}'.format(
- self.in_features, self.out_features, self.bias is not None
- )
- if __name__ == '__main__':
- i = torch.LongTensor([[0, 1, 1],
- [2, 0, 2]])
- v = torch.FloatTensor([3, 4, 5])
- input_tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3])).requires_grad_(True)
- layer = SparseLinear(3, 5)
- output_tensor = layer(input_tensor)
- print('Size: {}'.format(output_tensor.size()))
- target = output_tensor.sum()
- target.backward()
- print('Gradients: {}'.format(input_tensor.grad))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement