Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import division
- import autograd.numpy as np
- from autograd.scipy.misc import logsumexp
- from autograd import grad
- from functools import partial
- def EM(init_params, data):
- def EM_update(params):
- expected_stats = grad(log_partition_function)(params, data) # E step
- return map(normalize, expected_statistics) # M step
- def fixed_point(f, x0):
- x1 = f(x0)
- while different(x0,x1):
- x0, x1 = x1, f(x1)
- return x1
- def different(params1, params2):
- return not all(map(np.allclose, params1, params2))
- return fixed_point(EM_update, init_params)
- def normalize(a):
- def replace_zeros(a):
- return np.where(a > 0., a, 1.)
- return a / replace_zeros(a.sum(-1, keepdims=True))
- def log_Z(params, data):
- if isinstance(data, list):
- return sum(map(partial(log_Z, params), data))
- log_pi0, log_A, log_B = map(np.log, params)
- log_alpha = log_pi0
- for y_t in data:
- log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y_t]
- return logsumexp(log_alpha)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement