Advertisement
Guest User

Untitled

a guest
May 26th, 2018
185
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.30 KB | None | 0 0
  1. # valueIterationAgents.py
  2. # -----------------------
  3. # Licensing Information:  You are free to use or extend these projects for
  4. # educational purposes provided that (1) you do not distribute or publish
  5. # solutions, (2) you retain this notice, and (3) you provide clear
  6. # attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
  7. #
  8. # Attribution Information: The Pacman AI projects were developed at UC Berkeley.
  9. # The core projects and autograders were primarily created by John DeNero
  10. # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
  11. # Student side autograding was added by Brad Miller, Nick Hay, and
  12. # Pieter Abbeel (pabbeel@cs.berkeley.edu).
  13.  
  14.  
  15. import mdp, util
  16.  
  17. from learningAgents import ValueEstimationAgent
  18.  
  19. class ValueIterationAgent(ValueEstimationAgent):
  20.     """
  21.        * Please read learningAgents.py before reading this.*
  22.  
  23.        A ValueIterationAgent takes a Markov decision process
  24.        (see mdp.py) on initialization and runs value iteration
  25.        for a given number of iterations using the supplied
  26.        discount factor.
  27.    """
  28.     def __init__(self, mdp, discount = 0.9, iterations = 100):
  29.         """
  30.          Your value iteration agent should take an mdp on
  31.          construction, run the indicated number of iterations
  32.          and then act according to the resulting policy.
  33.  
  34.          Some useful mdp methods you will use:
  35.              mdp.getStates()
  36.              mdp.getPossibleActions(state)
  37.              mdp.getTransitionStatesAndProbs(state, action)
  38.              mdp.getReward(state, action, nextState)
  39.              mdp.isTerminal(state)
  40.        """
  41.         self.mdp = mdp
  42.         self.discount = discount
  43.         self.iterations = iterations
  44.         self.values = util.Counter() # A Counter is a dict with default 0
  45.  
  46.         # Write value iteration code here
  47.         "*** YOUR CODE HERE ***"
  48.        
  49.         currIteration = 0
  50.         states = mdp.getStates()
  51.        
  52.         while currIteration < iterations:
  53.             helpVals = util.Counter()
  54.             for state in states:
  55.                 tempAction = self.computeActionFromValues(state)
  56.                 if not tempAction == None:
  57.                     helpVals[state] = self.computeQValueFromValues(state,tempAction)
  58.             self.values = helpVals
  59.             currIteration += 1
  60.  
  61.  
  62.  
  63.  
  64.  
  65.     def getValue(self, state):
  66.         """
  67.          Return the value of the state (computed in __init__).
  68.        """
  69.         return self.values[state]
  70.  
  71.  
  72.     def computeQValueFromValues(self, state, action):
  73.         """
  74.          Compute the Q-value of action in state from the
  75.          value function stored in self.values.
  76.        """
  77.         "*** YOUR CODE HERE ***"
  78.         nextStateAndProb = self.mdp.getTransitionStatesAndProbs(state,action)
  79.         qVal = 0
  80.  
  81.         for nextState, probability in nextStateAndProb:
  82.             qVal += probability*( self.mdp.getReward(state,action,nextState)
  83.             + self.discount*self.values[nextState])
  84.        
  85.         return qVal
  86.  
  87.         util.raiseNotDefined()
  88.  
  89.     def computeActionFromValues(self, state):
  90.         """
  91.          The policy is the best action in the given state
  92.          according to the values currently stored in self.values.
  93.  
  94.          You may break ties any way you see fit.  Note that if
  95.          there are no legal actions, which is the case at the
  96.          terminal state, you should return None.
  97.        """
  98.         "*** YOUR CODE HERE ***"
  99.         if self.mdp.isTerminal(state):
  100.             return None
  101.        
  102.         possibleActions = self.mdp.getPossibleActions(state)
  103.         maxAction = possibleActions[0]
  104.         qVal = self.computeQValueFromValues(state,maxAction)
  105.        
  106.         for action in possibleActions:
  107.             tempVal = self.computeQValueFromValues(state,action)
  108.             if tempVal > qVal:
  109.                 qVal = tempVal
  110.                 maxAction = action
  111.  
  112.         return maxAction
  113.  
  114.  
  115.  
  116.         util.raiseNotDefined()
  117.  
  118.     def getPolicy(self, state):
  119.         return self.computeActionFromValues(state)
  120.  
  121.     def getAction(self, state):
  122.         "Returns the policy at the state (no exploration)."
  123.         return self.computeActionFromValues(state)
  124.  
  125.     def getQValue(self, state, action):
  126.         return self.computeQValueFromValues(state, action)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement