Advertisement
Guest User

Untitled

a guest
Jan 23rd, 2020
119
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 18.56 KB | None | 0 0
  1. #!/usr/bin/env python2
  2. from __future__ import print_function
  3. from __future__ import absolute_import, division, print_function, unicode_literals
  4. import matplotlib
  5.  
  6. matplotlib.use('Agg')
  7. import os
  8. import time
  9. import numpy as np
  10. from numpy import inf, random
  11. import matplotlib.pyplot as plt
  12. import pickle
  13. import json
  14. import robobo
  15. import cv2
  16. import sys
  17. import signal
  18. from pprint import pprint
  19. import prey
  20.  
  21. import collections
  22.  
  23. use_simulation = True
  24. run_test = False
  25. speed = 20 if use_simulation else 30
  26. dist = 500 if use_simulation else 400
  27. rewards = [0]
  28. fitness = list()
  29.  
  30.  
  31. def terminate_program(signal_number, frame):
  32.     print("Ctrl-C received, terminating program")
  33.     sys.exit(1)
  34.  
  35.  
  36. def main():
  37.     signal.signal(signal.SIGINT, terminate_program)
  38.  
  39.     rob = robobo.SimulationRobobo().connect(address='10.15.3.49', port=19997) if use_simulation \
  40.         else robobo.HardwareRobobo(camera=True).connect(address="10.15.3.49")
  41.     rob.set_phone_tilt(45, 100)
  42.  
  43.     def get_sensor_info(direction):
  44.         a = np.log(np.array(rob.read_irs())) / 10
  45.         all_sensor_info = np.array([0 if x == inf else 1 + (-x / 2) - 0.2 for x in a]) if use_simulation \
  46.             else np.array(np.log(rob.read_irs())) / 10
  47.         all_sensor_info[all_sensor_info == inf] = 0
  48.         all_sensor_info[all_sensor_info == -inf] = 0
  49.         # [0, 1, 2, 3, 4, 5, 6, 7]
  50.         if direction == 'front':
  51.             return all_sensor_info[5]
  52.         elif direction == 'back':
  53.             return all_sensor_info[1]
  54.         elif direction == 'front_left':
  55.             return all_sensor_info[6]
  56.         elif direction == 'front_left_left':
  57.             return all_sensor_info[7]
  58.         elif direction == 'front_right':
  59.             return all_sensor_info[4]
  60.         elif direction == 'front_right_right':
  61.             return all_sensor_info[3]
  62.         elif direction == 'back_left':
  63.             return all_sensor_info[0]
  64.         elif direction == 'back_right':
  65.             return all_sensor_info[2]
  66.         elif direction == 'all':
  67.             print(all_sensor_info[3:])
  68.             return all_sensor_info
  69.         elif direction == 'front_3':
  70.             return [all_sensor_info[3]] + [all_sensor_info[5]] + [all_sensor_info[7]]
  71.         else:
  72.             raise Exception('Invalid direction')
  73.  
  74.     # safe, almost safe, not safe. combine with previous state of safe almost safe and not safe.
  75.     # safe to almost safe is good, almost safe to safe is okay, safe to safe is neutral
  76.     # s to a to r to s'.
  77.     # Small steps for going left or right (left or right are only rotating and straight is going forward).
  78.     # controller is the q values: the boundary for every sensor.
  79.  
  80.     def move_left():
  81.         rob.move(-speed, speed, dist)
  82.  
  83.     def move_right():
  84.         rob.move(speed, -speed, dist)
  85.  
  86.     def go_straight():
  87.         rob.move(speed, speed, dist)
  88.  
  89.     def move_back():
  90.         rob.move(-speed, -speed, dist)
  91.  
  92.     boundary_sensor = [0.6, 0.8] if not use_simulation else [0.5, 0.95]
  93.     boundaries_color = [0.1, 0.7] if not use_simulation else [0.05, 0.85]
  94.  
  95.     # A static collision-avoidance policy
  96.     def static_policy(color_info):
  97.         max_c = np.max(color_info)
  98.         if max_c == color_info[0]:
  99.             return 1
  100.         elif max_c == color_info[1]:
  101.             return 0
  102.         elif max_c == color_info[2]:
  103.             return 2
  104.         return 0
  105.  
  106.  
  107.     def epsilon_policy(s, epsilon):
  108.         s = str(s)
  109.         # epsilon greedy
  110.         """"
  111.      ACTIONS ARE DEFINED AS FOLLOWS:
  112.        NUM: ACTION
  113.          ------------
  114.          0: STRAIGHT
  115.          1: LEFT
  116.          2: RIGHT
  117.          ------------
  118.      """
  119.         e = 0 if run_test else epsilon
  120.         if e > random.random():
  121.             return random.choice([0, 1, 2])
  122.         else:
  123.             return np.argmax(state_table[s])
  124.  
  125.     def take_action(action):
  126.         if action == 1:
  127.             move_left()
  128.         elif action == 2:
  129.             move_right()
  130.         elif action == 0:
  131.             go_straight()
  132.         # elif action == 'back':
  133.         #     move_back()
  134.  
  135.     def get_color_info():
  136.         image = rob.get_image_front()
  137.  
  138.         # Mask function
  139.         def get_green_pixels(img):
  140.             count = 0
  141.             pix_count = 0
  142.             b = 64
  143.             for i in range(len(img)):
  144.                 for j in range(len(img[i])):
  145.                     pixel = img[i][j]
  146.                     pix_count += 1
  147.                     if (pixel[0] > b or pixel[2] > b) and pixel[1] < b * 2 \
  148.                             or (pixel[0] > b and pixel[1] > b and pixel[2] > b):
  149.                         # img[i][j] = [0, 0, 0]
  150.                         count += 1
  151.             return 1 - (count / pix_count)
  152.  
  153.         left, middle_l, middle_r, right = np.hsplit(image, 4)
  154.         middle = np.concatenate((middle_l, middle_r), axis=1)
  155.         return get_green_pixels(left), get_green_pixels(middle), get_green_pixels(right)
  156.  
  157.     def get_reward(previous_state, new_state,
  158.                    previous_sensor, new_sensor,
  159.                    prev_action, action,
  160.                    prev_val, new_val,
  161.                    prev_food, new_food, no_blocks):
  162.  
  163.         # Max no. of green in img, before and after
  164.         # 0: No green pixels in img;    1: All img consists of green pixels
  165.  
  166.         prev_right, prev_mid, prev_left = prev_val
  167.         sum_prev_val = sum(prev_val)
  168.         new_right, new_mid, new_left = new_val
  169.         sum_new_val = sum(new_val)
  170.         max_new_sensor = np.max(new_sensor)
  171.         max_prev_sensor = np.max(previous_sensor)
  172.         max_c_prev = np.max(previous_state[3:])
  173.         max_c_new = np.max(new_state[3:])
  174.  
  175.         # TODO Check which condition to keep
  176.         if max_c_new == 2:
  177.             print("30")
  178.             no_blocks += 1
  179.             return 30
  180.  
  181.         if sum_prev_val < sum_new_val:
  182.             return 10
  183.  
  184.         if max_new_sensor > boundary_sensor[1]:
  185.             return -100
  186.  
  187.         if (action == 1 and prev_action == 2) or (action == 2 and prev_action == 1):
  188.             return -5
  189.  
  190.         return 1 if action == 0 else -1
  191.  
  192.     # Returns list of values with discretized sensor values and color values.
  193.     def make_discrete(values_s, boundary_s, values_c, boundaries_c):
  194.         discrete_list_s = []
  195.         discrete_list_c = []
  196.  
  197.         for x in values_s:
  198.             if boundary_s[0] > x:
  199.                 discrete_list_s.append(0)
  200.             elif boundary_s[1] > x > boundary_s[0]:
  201.                 discrete_list_s.append(1)
  202.             else:
  203.                 discrete_list_s.append(2)
  204.         for y in values_c:
  205.             if y < boundaries_c[0]:
  206.                 discrete_list_c.append(0)
  207.             elif boundaries_c[0] < y < boundaries_c[1]:
  208.                 discrete_list_c.append(1)
  209.             else:
  210.                 discrete_list_c.append(2)
  211.         print('real c_values: ', values_c)
  212.         return discrete_list_s + discrete_list_c
  213.  
  214.     """
  215.  REINFORCEMENT LEARNING PROCESS
  216.  INPUT:  alpha    : learning rate
  217.          gamma    : discount factor
  218.          epsilon  : epsilon value for e-greedy
  219.          episodes : no. of episodes
  220.          act_lim  : no. of actions robot takes before ending episode
  221.          qL       : True if you use Q-Learning
  222.  """
  223.     stat_fitness = list()
  224.     stat_rewards = [0]
  225.  
  226.     def normalize(reward, old_min, old_max, new_min=-1, new_max=1):
  227.         return ((reward - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min
  228.  
  229.     def run_static(lim, no_blocks=0):
  230.         for i in range(lim):
  231.             if use_simulation:
  232.                 rob.play_simulation()
  233.  
  234.             a, b, c = get_color_info()
  235.             current_color_info = a, b, c
  236.             current_sensor_info = get_sensor_info('front_3')
  237.  
  238.             current_state = make_discrete(get_sensor_info('front_3'), boundary_sensor, current_color_info,
  239.                                           boundaries_color)
  240.  
  241.             if str(current_state) not in state_table.keys():
  242.                 state_table[str(current_state)] = [0 for _ in range(3)]
  243.  
  244.             a, b, c = get_color_info()
  245.             new_color_info = a, b, c
  246.             # print(a, b, c, new_color_info)
  247.  
  248.             action = static_policy(new_color_info)
  249.  
  250.             take_action(action)
  251.  
  252.             new_state = make_discrete(get_sensor_info('front_3'), boundary_sensor, new_color_info,
  253.                                       boundaries_color)
  254.             # TODO: make sure that current color info gets initialized the first time.
  255.             r = get_reward(current_state, new_state, action, current_color_info, new_color_info, no_blocks)
  256.             if r == 20:
  257.                 no_blocks += 1
  258.  
  259.             norm_r = normalize(r, -30, 20)
  260.  
  261.             if i != 0:
  262.                 stat_fitness.append(stat_fitness[-1] + (no_blocks / i))
  263.             else:
  264.                 stat_fitness.append(float(0))
  265.             print(fitness)
  266.             if stat_rewards:
  267.                 stat_rewards.append(stat_rewards[-1] + norm_r)
  268.             else:
  269.                 rewards.append(norm_r)
  270.  
  271.             current_state = new_state
  272.             current_color_info = new_color_info
  273.  
  274.     def rl(alpha, gamma, epsilon, episodes, act_lim, param_tuples, qL=False, no_blocks=0):
  275.         def epsilon_policy(s, epsilon):
  276.             s = str(s)
  277.             # epsilon greedy
  278.             """"
  279.           ACTIONS ARE DEFINED AS FOLLOWS:
  280.             NUM: ACTION
  281.               ------------
  282.               0: STRAIGHT
  283.               1: LEFT
  284.               2: RIGHT
  285.               ------------
  286.           """
  287.             e = 0 if run_test else epsilon
  288.             if e > random.random():
  289.                 return random.choice([0, 1, 2])
  290.             else:
  291.                 return np.argmax(state_table[s])
  292.  
  293.         state_table = {}
  294.         q_table_file = './src/state_table.json' if len(param_tuples) == 1 else './src/state_table__epsilon' + ''.join(str(epsilon).split('.')) + '_gamma' + ''.join(str(gamma).split('.')) + '.json'  # We check if we are running for multiple hyperparam settings: if not (len==1), we're doing full training and just wanna use state_table.json, else we wanna specificy which state_table
  295.         if os.path.exists(q_table_file):
  296.             with open(q_table_file) as g:
  297.                 state_table = json.load(g)
  298.  
  299.         fitness = list()
  300.         rewards = [0]
  301.  
  302.         for i in range(episodes):
  303.             print('Episode ' + str(i))
  304.             terminate = False
  305.             if use_simulation:
  306.                 rob.play_simulation()
  307.  
  308.             current_color_space = get_color_info()
  309.             current_sensor_info = get_sensor_info('front_3')
  310.             current_state = make_discrete(current_sensor_info, boundary_sensor, current_color_space,
  311.                                           boundaries_color)
  312.  
  313.             if str(current_state) not in state_table.keys():
  314.                 state_table[str(current_state)] = [0 for _ in range(3)]
  315.  
  316.             action = epsilon_policy(current_state, epsilon)
  317.             current_collected_food = rob.collected_food() if use_simulation else 0
  318.             # initialise state if it doesn't exist, else retrieve the current q-value
  319.             x = 0
  320.             while not terminate:
  321.  
  322.                 take_action(action)
  323.                 new_collected_food = rob.collected_food() if use_simulation else 0
  324.  
  325.                 # Whole img extracted to get reward value
  326.                 # left, mid, right extracted to save state space accordingly
  327.  
  328.                 new_color_space = get_color_info()
  329.                 new_sensor_info = get_sensor_info('front_3')
  330.                 new_state = make_discrete(new_sensor_info, boundary_sensor, new_color_space,
  331.                                           boundaries_color)
  332.  
  333.                 if str(new_state) not in state_table.keys():
  334.                     state_table[str(new_state)] = [0 for _ in range(3)]
  335.  
  336.                 new_action = epsilon_policy(new_state, epsilon)
  337.  
  338.                 # Retrieve the max action if we use Q-Learning
  339.                 max_action = np.argmax(state_table[str(new_state)]) if qL else new_action
  340.  
  341.                 # Get reward
  342.                 r = get_reward(current_state, new_state,
  343.                                current_sensor_info, new_sensor_info,
  344.                                action, new_action,
  345.                                current_color_space, new_color_space,
  346.                                current_collected_food, new_collected_food, no_blocks)
  347.                 print("State and obtained Reward: ", new_state, r)
  348.  
  349.                 norm_r = normalize(r, -30, 20)
  350.  
  351.                 if i != 0:
  352.                     fitness.append(no_blocks / i)
  353.                 else:
  354.                     fitness.append(float(0))
  355.                 # print(fitness)
  356.                 if rewards:
  357.                     rewards.append(rewards[-1] + norm_r)
  358.                 else:
  359.                     rewards.append(norm_r)
  360.  
  361.                 # Update rule
  362.                 if not run_test:
  363.                     # print('update')
  364.                     state_table[str(current_state)][action] += \
  365.                         alpha * (r + (gamma *
  366.                                       np.array(
  367.                                           state_table[str(new_state)][max_action]))
  368.                                  - np.array(state_table[str(current_state)][action]))
  369.  
  370.                 # Stop episode if we get very close to an obstacle
  371.                 if (max(new_state[:3]) == 2 and max(new_state[3:]) != 2 and use_simulation) or x == act_lim - 1:
  372.                     state_table[str(new_state)][new_action] = -10
  373.                     terminate = True
  374.                     print("done")
  375.                     if not run_test:
  376.                         print('writing json')
  377.                         with open(q_table_file, 'w') as json_file:
  378.                             json.dump(state_table, json_file)
  379.  
  380.                     if use_simulation:
  381.                         print("stopping the simulation")
  382.                         rob.stop_world()
  383.                         while not rob.is_sim_stopped():
  384.                             print("waiting for the simulation to stop")
  385.                         time.sleep(2)
  386.  
  387.                 # update current state and action
  388.                 current_state = new_state
  389.                 current_sensor_info = new_sensor_info
  390.                 action = new_action
  391.                 current_color_space = new_color_space
  392.                 current_collected_food = new_collected_food
  393.                 # img = new_img
  394.  
  395.                 # increment action limit counter
  396.                 x += 1
  397.  
  398.         return fitness, rewards
  399.  
  400.     experiments = 2
  401.     eps = 1 # episodes Task 2
  402.     timesteps = 2
  403.  
  404.     epsilons = [0.01, 0.08, 0.22]
  405.     gammas = [0.9]
  406.     param_tuples = [(epsilon, gamma) for epsilon in epsilons for gamma in gammas]
  407.     param_tuples = [(0.011, 0.9), (0.99, 0.9)] ### IMPORTANT: if we are just doing 1 hyper param setting (usually for full training of model with chosen most promising hyper params), put epsilon and gamma respectively in this format: [(epsilon, gamma)]
  408.  
  409.     # RL_fitnesses = {key: np.zeros(eps) for key in param_tuples}
  410.     for epsilon, gamma in param_tuples:
  411.  
  412.         for run in range(experiments):
  413.             print('======= RUNNING FOR epsilon ' + str(epsilon) + ' and gamma ' + str(gamma),
  414.                   ' , this is run ' + str(run))
  415.             fitness, rewards = rl(0.9, gamma, epsilon, eps, timesteps, param_tuples,
  416.                                   qL=True)  # alpha, gamma, epsilon, episodes, actions per episode
  417.  
  418.             # RL_fitnesses[(epsilon, gamma)][0] = fitness
  419.  
  420.             # if not run_test:
  421.             # if os.path.exists('./src/rewards.csv'):
  422.             #     with open('./src/rewards.csv') as f:
  423.             #         all_rewards = pickle.load(f)
  424.             #
  425.             # if os.path.exists('./src/fitness.csv'):
  426.             #     with open('./src/fitness.csv') as f:
  427.             #         all_fits = pickle.load(f)
  428.             file_name_rewards = './src/rewards_epsilon' + str(epsilon) + '_run' + str(run) + '.csv'
  429.             with open(file_name_rewards, 'wb') as f:
  430.                 pickle.dump(rewards, f)
  431.  
  432.             file_name_fitness = './src/fitnesss_epsilon' + str(epsilon) + '_run' + str(run) + '.csv'
  433.             with open(file_name_fitness, 'wb') as f:
  434.                 pickle.dump(fitness, f)
  435.  
  436.         # with open('./src/stat_rewards.csv', 'w') as f:
  437.         #     pickle.dump(stat_rewards, f)
  438.         #
  439.  
  440.         # with open('./src/stat_fitness.csv', 'w') as f:
  441.         #     pickle.dump(stat_fitness, f)
  442.         #
  443.  
  444.         # plt.figure('Fitness Values')
  445.     #     plt.plot(RL_fitnesses[(epsilon, gamma)], label=str(epsilon))
  446.     #     # plt.plot(stat_fitness, label='Static Controller')
  447.     # plt.legend()
  448.     # plt.savefig("./src/plotyay_fitness.png")
  449.     # plt.show()
  450.     #
  451.     #     plt.figure('Rewards')
  452.     #     plt.plot(all_rewards, label='Q-Learning Controller')
  453.     #     # plt.plot(stat_rewards, label='Static Controller')
  454.     #     plt.legend()
  455.     #     plt.savefig("./src/plot_reward.png")
  456.     #     plt.show()
  457.  
  458.  
  459. def image_test():
  460.     signal.signal(signal.SIGINT, terminate_program)
  461.     rob = robobo.SimulationRobobo().connect(address='10.15.3.49', port=19997) if use_simulation \
  462.         else robobo.HardwareRobobo(camera=True).connect(address="172.20.10.11")
  463.     if use_simulation:
  464.         rob.play_simulation()
  465.     # rob.set_phone_tilt(109, 90)
  466.     image = rob.get_image_front()
  467.  
  468.     rob.move(50, -50, 1000)
  469.  
  470.     def get_green_pixels(img):
  471.         count = 0
  472.         pix_count = 0
  473.         b = 64
  474.         for i in range(len(img)):
  475.             for j in range(len(img[i])):
  476.                 pixel = img[i][j]
  477.                 pix_count += 1
  478.                 if (pixel[0] > b or pixel[2] > b) and pixel[1] < b * 2 \
  479.                         or (pixel[0] > b and pixel[1] > b and pixel[2] > b):
  480.                     img[i][j] = [0, 0, 0]
  481.                     count += 1
  482.         return 1 - (count / pix_count)
  483.  
  484.         # rob.set_phone_tilt(109, 100)
  485.         # image = rob.get_image_front()
  486.         # print(image)
  487.  
  488.     left, middle_l, middle_r, right = np.hsplit(image, 4)
  489.     middle = np.concatenate((middle_l, middle_r), axis=1)
  490.  
  491.     x = rob.collected_food() if use_simulation else 0
  492.     pprint(x)
  493.     # print([get_green_pixels(left), get_green_pixels(middle), get_green_pixels(right)])
  494.     print(np.average(get_green_pixels(image)))
  495.  
  496.     if use_simulation:
  497.         print("stopping the simulation")
  498.         rob.stop_world()
  499.         while not rob.is_sim_stopped():
  500.             print("waiting for the simulation to stop")
  501.         time.sleep(2)
  502.  
  503.  
  504. if __name__ == "__main__":
  505.     main()
  506.     # image_test()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement