Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /*
- For this environment simulator we will represent the environment
- in a dictionary of key/value pairs. This isn't the most efficient,
- but it's easy to implement.
- */
- % hashtable manipulation utilities
- % get the value for a key
- % get_attr(key,list,value).
- get_attr(Attr,[],[]).
- get_attr(Attr,[[Attr,Result]|T],Result).
- get_attr(Attr,[H|T],Result) :- get_attr(Attr,T,Result).
- % set the value for a key
- % set_attr(key,value,list,newlist).
- set_attr(Attr,Value,[],[[Attr,Value]]).
- set_attr(Attr,Value,[[Attr,V]|T],[[Attr,Value]|T]).
- set_attr(Attr,Value,[[I,M]|T],[[I,M]|Result]) :-
- set_attr(Attr,Value,T,Result).
- % display the world
- write_state([]).
- write_state([[A,B]|T]) :-
- write('['),write(A),write(','),write(B),write(']'),
- write_state(T).
- /* Here we use some abstract rules that rely on agent specific rules.
- this allows us to keep the system generic, and promotes code reuse */
- % useful prototypes
- % change the state by applying an action rule to it.
- actuate(Action,State,NewState) :- action(Action,State,NewState).
- % example action rule. Called by actuate.
- action(end,State,NewState) :- fail.
- agent_loop([],State,State,Models,Models).
- agent_loop([A|Agents],State,NewState,Models,NewModels) :-
- sensor(A,State,Percept),
- get_attr(A,Models,Model),
- agent_program(A,Percept,Action,Model,NewModel),
- write(A),write(' does '),write(Action),nl,
- actuate(Action,State,MidState),
- write(' After state:'),write_state(MidState),nl,
- set_attr(A,NewModel,Models,MidModels),
- agent_loop(Agents,MidState,NewState,MidModels,NewModels).
- % main loop
- % start: load the initial state, and execute the first step.
- start :- initial_state(X),initial_models(M),step(X,M).
- % display the state, build the percept, get the agent's action
- % actuate that action, display the results
- % step on the new state.
- step(State,Models) :- !,write('Step? [Y|N] '),read(X),X='y',nl,
- write('Before state: '),write_state(State),nl,
- exogenous_action(State,EState),
- agents(A),agent_loop(A,EState,MidState,Models,NewModels),
- !,step(MidState,NewModels).
- % NB: step/1 is recursive, and uses tail-call recursion to pass the
- % state between calls. This is a common prolog technique. The execution
- % is actually iterative.
- % vacuum world initial state
- % this is where the code gets specific to this domain.
- location(a). %this isn't really useful, but I wanted to make it clear
- %what my locations are.
- location(b).
- %this is important. This defines the relationship between the two
- %locations in this very simple universe.
- connect(a,b).
- %and here's my initial state.
- %The vacuum is at location a, a is dirty, b is clean.
- initial_state([[location,a],[a,dirty],[b,clean]]).
- initial_models([]).
- agents([sra]).
- % use this hook to make the world mutate.
- exogenous_action(State,State).
- % global actions - these are the things we can do in Vacuum World
- % these define how an action performed by the vacuum impacts the state.
- % clean: change the location's state to clean.
- action(clean,State,NewState) :- get_attr(location,State,L),
- set_attr(L,clean,State,NewState).
- % move_left: if my current location(L) is connected like so connect(X,L)
- % change my location to X.
- action(move_left,State,NewState) :- get_attr(location,State,L),
- connect(X,L),set_attr(location,X,State,NewState).
- % move_right: reverse of move_left.
- action(move_right,State,NewState) :- get_attr(location,State,L),
- connect(L,X),set_attr(location,X,State,NewState).
- % vacuum perceptions
- % the vacuum's sensor lets it know which location it's in
- % and whether the current location is dirty or clean.
- % it cannot tell the cleanliness of other locations.
- % This is the Sensor for ALL agents in Vacuum World.
- % In a more advanced setting, we could have different sensors for each
- % agent.
- sensor(X,State,[[location,L],[L,CoD],[moves,Moves]]) :-
- get_attr(location,State,L),
- get_attr(L,State,CoD),possible_moves(State,Moves).
- possible_moves(State,[[left, LP],[right, RP]]) :-
- get_attr(location,State,L), can_go(left,L,LP), can_go(right,L,RP).
- can_go(left,Loc,R) :- connect(X,Loc),R = true.
- can_go(right,Loc,R) :- connect(Loc,X),R = true.
- can_go(_,_,R) :- R = false.
- % finally, the agent program that implements my agent function.
- % this example is a simple reflex agent.
- % if my current location is dirty, clean it
- %
- % the simple reflex agent
- agent_program(sra,Percept,Action,_,[]) :-
- write('Percept:'),nl,
- write_state(Percept),nl,
- get_attr(location,Percept,L),
- get_attr(L,Percept,dirty),Action = clean.
- % if I can go right, go right.
- agent_program(sra,Percept,Action,_,[]) :- get_attr(moves,Percept,M),
- get_attr(right,M,true), Action = move_right.
- % if I can go left, go left
- agent_program(sra,Percept,Action,_,[]) :- get_attr(moves,Percept,M),
- get_attr(left,M,true), Action = move_left.
Add Comment
Please, Sign In to add comment