Advertisement
Guest User

Untitled

a guest
Jun 27th, 2019
191
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.31 KB | None | 0 0
  1. def one_step_attention(self, a, s_prev,t0):
  2. repeator = RepeatVector(Tx)
  3. concatenator = Concatenate(axis=-1)
  4. densor1 = Dense(10, activation = "tanh")
  5. densor2 = Dense(1, activation = "relu")
  6. activator = Activation(self.softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
  7. dotor = Dot(axes = 1)
  8. # Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a".
  9. s_prev = repeator(s_prev)
  10. # Use concatenator to concatenate a and s_prev on the last axis
  11. concat = concatenator([s_prev,a])
  12. # Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e.
  13. e = densor1(concat)
  14. # Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies.
  15. energies = densor2(e)
  16. # Use "activator" on "energies" to compute the attention weights "alphas"
  17. energies = Subtract(name='data-time')([energies,t0])
  18. alphas = activator(energies)
  19. # Use dotor together with "alphas" and "a" to compute the context vector to be given to the next layer
  20. context = dotor([alphas,a])
  21. return context
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement