Advertisement
Guest User

Untitled

a guest
Oct 9th, 2015
111
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.08 KB | None | 0 0
  1. from __future__ import division
  2. import autograd.numpy as np
  3. from autograd.scipy.misc import logsumexp
  4. from autograd import grad
  5. from functools import partial
  6.  
  7. def EM(init_params, data):
  8. def EM_update(params):
  9. expected_stats = grad(log_partition_function)(params, data) # E step
  10. return map(normalize, expected_statistics) # M step
  11.  
  12. def fixed_point(f, x0):
  13. x1 = f(x0)
  14. while different(x0,x1):
  15. x0, x1 = x1, f(x1)
  16. return x1
  17.  
  18. def different(params1, params2):
  19. return not all(map(np.allclose, params1, params2))
  20.  
  21. return fixed_point(EM_update, init_params)
  22.  
  23. def normalize(a):
  24. def replace_zeros(a):
  25. return np.where(a > 0., a, 1.)
  26. return a / replace_zeros(a.sum(-1, keepdims=True))
  27.  
  28. def log_Z(params, data):
  29. if isinstance(data, list):
  30. return sum(map(partial(log_Z, params), data))
  31.  
  32. log_pi0, log_A, log_B = map(np.log, params)
  33. log_alpha = log_pi0
  34. for y_t in data:
  35. log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y_t]
  36. return logsumexp(log_alpha)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement