Advertisement
Guest User

Untitled

a guest
Feb 21st, 2017
76
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.33 KB | None | 0 0
  1. #! python
  2.  
  3. import numpy as np
  4. import operator
  5.  
  6. dynamic_model = np.array([
  7.     [0.7, 0.3],
  8.     [0.3, 0.7]
  9. ])
  10. observation_model = np.array([
  11.     [0.9, 0.0],
  12.     [0.0, 0.2]
  13. ])
  14. initial_rain_probabilities = np.array(
  15.     [0.5, 0.5]
  16. )
  17. evidences = [True, True, False, True, True]
  18.  
  19. def forward(state, evidence):
  20.     obsmodel = observation_model
  21.     if not evidence:
  22.         obsmodel = np.eye(len(obsmodel)) - obsmodel
  23.     result = np.dot(obsmodel, np.transpose(dynamic_model))
  24.     result = np.dot(result, state)
  25.     return result/result.sum()
  26.  
  27. def backward(probability, evidence):
  28.     obsmodel = observation_model
  29.     if not evidence:
  30.         obsmodel = np.eye(len(obsmodel)) - obsmodel
  31.     result = np.dot(dynamic_model, obsmodel)
  32.     result = np.dot(result, probability)
  33.     return result
  34.  
  35. def viterbi():
  36.     """Return the best path, given an HMM model and a sequence of observations"""
  37.     # A - initialise stuff
  38.     nSamples = len(evidences)
  39.     nStates = dynamic_model.shape[0] # number of states
  40.     c = np.zeros(nSamples) #scale factors (necessary to prevent underflow)
  41.     viterbi = np.zeros((nStates,nSamples)) # initialise viterbi table
  42.     psi = np.zeros((nStates,nSamples)) # initialise the best path table
  43.     best_path = np.zeros(nSamples); # this will be your output
  44.  
  45.     # B- appoint initial values for viterbi and best path (bp) tables - Eq (32a-32b)
  46.     viterbi[:,0] = initial_rain_probabilities.T * observation_model[:, evidences[0]]
  47.     c[0] = 1.0/np.sum(viterbi[:,0])
  48.     viterbi[:,0] = c[0] * viterbi[:,0] # apply the scaling factor
  49.     psi[0] = 0;
  50.  
  51.     # C- Do the iterations for viterbi and psi for time>0 until T
  52.     for t in range(1,nSamples): # loop through time
  53.         for s in range (0,nStates): # loop through the states @(t-1)
  54.             trans_p = viterbi[:,t-1] * dynamic_model[:,s]
  55.             psi[s,t], viterbi[s,t] = max(enumerate(trans_p), key=operator.itemgetter(1))
  56.             viterbi[s,t] = viterbi[s,t]*observation_model[s, evidences[t]]
  57.  
  58.         c[t] = 1.0/np.sum(viterbi[:,t]) # scaling factor
  59.         viterbi[:,t] = c[t] * viterbi[:,t]
  60.  
  61.     # D - Back-tracking
  62.     best_path[nSamples-1] =  viterbi[:,nSamples-1].argmax() # last state
  63.     for t in range(nSamples-1,0,-1): # states of (last-1)th to 0th time step
  64.         best_path[t-1] = psi[best_path[t],t]
  65.  
  66.     return best_path
  67.  
  68. print "Forward phase"
  69. forward_states = np.array([None]*(len(evidences)+1))
  70. forward_states[0] = initial_rain_probabilities
  71. for evidence_no in range(0, len(evidences)):
  72.     forward_states[evidence_no + 1] = forward(
  73.         forward_states[evidence_no],
  74.         evidences[evidence_no]
  75.     )
  76.     print evidence_no+1, forward_states[evidence_no + 1]
  77.  
  78. print "\nBackward phase"
  79. backward_states = np.array([None]*(len(evidences)+1))
  80. backward_states[0] = initial_rain_probabilities
  81. backward_probability = np.array([1.0, 1.0])
  82. for evidence_no in range(len(evidences), -1, -1):
  83.     new_probability = forward_states[evidence_no]*backward_probability
  84.     backward_states[evidence_no] = new_probability/new_probability.sum()
  85.     print evidence_no, backward_probability
  86.     backward_probability = backward(backward_probability, evidences[evidence_no-1])
  87.  
  88. print "\nResults with smoothing"
  89. for state_no in range(len(backward_states)):
  90.     print state_no, backward_states[state_no]
  91.  
  92. print "\nViterbi"
  93. print viterbi()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement