Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def one_step_attention(self, a, s_prev,t0):
- repeator = RepeatVector(Tx)
- concatenator = Concatenate(axis=-1)
- densor1 = Dense(10, activation = "tanh")
- densor2 = Dense(1, activation = "relu")
- activator = Activation(self.softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
- dotor = Dot(axes = 1)
- # Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a".
- s_prev = repeator(s_prev)
- # Use concatenator to concatenate a and s_prev on the last axis
- concat = concatenator([s_prev,a])
- # Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e.
- e = densor1(concat)
- # Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies.
- energies = densor2(e)
- # Use "activator" on "energies" to compute the attention weights "alphas"
- energies = Subtract(name='data-time')([energies,t0])
- alphas = activator(energies)
- # Use dotor together with "alphas" and "a" to compute the context vector to be given to the next layer
- context = dotor([alphas,a])
- return context
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement