Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch.nn import Parameter
- from torch.autograd import Variable
- import torch.nn.functional as F
- torch.manual_seed(9)
- #### create RNN cell ###########
- rnn_a = torch.nn.RNNCell(input_size=1,hidden_size=1,bias=False)
- #### Print All Parameter in RnnCell ###########
- for weight in rnn_a.parameters():
- print(weight)
- #### Create Input (batch,sequence,feature vector) ###########
- random_input = Variable(torch.FloatTensor(1, 3, 1).normal_(), requires_grad=False) #batch,sequence,feature
- random_input[0,0,0] = 1
- random_input[:, 0, 0]
- h0 = Variable(torch.zeros(1,1),requires_grad=False)
- print(random_input)
- print(h0)
- #### Show Hidden_value in time step1 compare with manual calculate ###########
- h1 = rnn_a(random_input[:,0,:], h0)
- print(h1)
- print("ht = tanh(0.3116) = ",F.tanh(torch.tensor(0.3116)))
- #### Show Hidden_value in time step2 compare with manual calculate ###########
- h2 = rnn_a(random_input[:,1,:], h1)
- print(h2)
- print("Manual calculate")
- print( F.tanh(torch.tensor((0.3116)*(-0.7775) + (-0.3960) * (0.3019))))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement