Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from gridworld import GridworldEnv
- env = GridworldEnv()
- def policy_eval(policy, env, discount_factor=1.0, epsilon=0.00001):
- """
- Evaluate a policy given an environment and a full description of the environment's dynamics.
- Args:
- policy: [S, A] shaped matrix representing the policy.
- env: OpenAI env. env.P represents the transition probabilities of the environment.
- env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
- env.nS is a number of states in the environment.
- env.nA is a number of actions in the environment.
- theta: We stop evaluation once our value function change is less than theta for all states.
- discount_factor: Gamma discount factor.
- Returns:
- Vector of length env.nS representing the value function.
- """
- # Start with a random (all 0) value function
- V = np.zeros(env.nS)
- while True:
- #old value function
- V_old = np.zeros(env.nS)
- #stopping condition
- delta = 0
- #loop over state space
- for s in range(env.nS):
- #To accumelate bellman expectation eqn
- Q = 0
- #get probability distribution over actions
- action_probs = policy[s]
- #loop over possible actions
- for a in range(env.nA):
- #get transitions
- [(prob, next_state, reward, done)] = env.P[s][a]
- #apply bellman expectatoin eqn
- Q += action_probs[a] * (reward + discount_factor * V[next_state])
- #get the biggest difference over state space
- delta = max(delta, abs(Q - V[s]))
- #update state-value
- V_old[s] = Q
- #the new value function
- V = V_old
- #if true value function
- if(delta < epsilon):
- break
- return np.array(V)
- random_policy = np.ones([env.nS, env.nA]) / env.nA
- v = policy_eval(random_policy, env)
- expected_v = np.array([0, -14, -20, -22, -14, -18, -20, -20, -20, -20, -18, -14, -22, -20, -14, 0])
- np.testing.assert_array_almost_equal(v, expected_v, decimal=2)
- print(v)
- print(expected_v)
Add Comment
Please, Sign In to add comment