Advertisement
Guest User

Untitled

a guest
May 14th, 2020
196
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 10.84 KB | None | 0 0
  1. diff --git a/settings.py b/settings.py
  2. index 8d1ffce..02bd246 100644
  3. --- a/settings.py
  4. +++ b/settings.py
  5. @@ -1,48 +1,80 @@
  6. +"""
  7. +File containing the settings.
  8. +"""
  9. +# based on: https://github.com/Sentdex/Carla-RL/blob/master/settings.py
  10. +
  11.  # Carla environment settings
  12. -CARLA_PATH = '../CARLA_0.9.6_Python_3.7'  # Path to Carla root folder
  13. -CARLA_HOSTS_TYPE = 'local'  # 'local' or 'remote', 'local' means that script can start and restart Carla Simulator
  14. +# CARLA_PATH = './carla_0.9.6'  # Path to Carla root folder
  15. +CARLA_PATH = "./carla"  # Path to Carla root folder
  16. +CARLA_HOSTS_TYPE = "remote"  # 'local' or 'remote', 'local' means that script can start and restart Carla Simulator
  17.  CARLA_HOSTS_NO = 1
  18. -CARLA_HOSTS = [['localhost', 2000, 10], ['localhost', 2002, 10]]  # List of hosts and ports and worlds to use, at least 2 ports of difference as Carla uses N and N+1 port, Town01 to Town97 for world currently, Town01 to Town07 for world are currently available, int number instead - random world change interval in minutes
  19. +CARLA_HOSTS = [
  20. +    ["localhost", 2000, 10],
  21. +    ["localhost", 2002, 10],
  22. +]  # List of hosts and ports and worlds to use, at least 2 ports of difference as Carla uses N and N+1 port, Town01 to Town97 for world currently, Town01 to Town07 for world are currently available, int number instead - random world change interval in minutes
  23.  SECONDS_PER_EPISODE = 10
  24.  EPISODE_FPS = 60  # Desired
  25.  IMG_WIDTH = 480
  26.  IMG_HEIGHT = 270
  27.  CAR_NPCS = 50
  28. -RESET_CAR_NPC_EVERY_N_TICKS = 1  # Resets one car NPC every given number of ticks, tick is about a second
  29. -ACTIONS = ['forward', 'forward_left', 'forward_right', 'brake', 'brake_left', 'brake_right']  # ['forward', 'left', 'right', 'forward_left', 'forward_right', 'backwards', 'backwards_left', 'backwards_right']
  30. +RESET_CAR_NPC_EVERY_N_TICKS = (
  31. +    1  # Resets one car NPC every given number of ticks, tick is about a second
  32. +)
  33. +ACTIONS = [
  34. +    "forward",
  35. +    "forward_left",
  36. +    "forward_right",
  37. +    "brake",
  38. +    "brake_left",
  39. +    "brake_right",
  40. +]  # ['forward', 'left', 'right', 'forward_left', 'forward_right', 'backwards', 'backwards_left', 'backwards_right']
  41.  WEIGHT_REWARDS_WITH_EPISODE_PROGRESS = False  # Linearly weights rewards from 0 to 1 with episode progress (from 0 up to SECONDS_PER_EPISODE)
  42. -WEIGHT_REWARDS_WITH_SPEED = 'linear'  # 'discrete': -1 < 50kmh, 1 otherwise, 'linear': -1..1 with 0..100kmh, 'quadratic': -1..1 with 0..100kmh with formula: (speed / 100) ** 1.3 * 2 - 1
  43. +WEIGHT_REWARDS_WITH_SPEED = "linear"  # 'discrete': -1 < 50kmh, 1 otherwise, 'linear': -1..1 with 0..100kmh, 'quadratic': -1..1 with 0..100kmh with formula: (speed / 100) ** 1.3 * 2 - 1
  44.  SPEED_MIN_REWARD = -1
  45.  SPEED_MAX_REWARD = 1
  46. -PREVIEW_CAMERA_RES = [[640, 400, -5, 0, 2.5], [1280, 800, -5, 0, 2.5]]  # Available resolutions from "above the car" preview camera [width, height, x, y, z], where x, y and z are related to car position
  47. -COLLISION_FILTER = [['static.sidewalk', -1], ['static.road', -1], ['vehicle.', 500]]  # list of pairs: agent id (can be part of the name) and impulse value allowed (-1 - disable collision detection entirely)
  48. +PREVIEW_CAMERA_RES = [
  49. +    [640, 400, -5, 0, 2.5],
  50. +    [1280, 800, -5, 0, 2.5],
  51. +]  # Available resolutions from "above the car" preview camera [width, height, x, y, z], where x, y and z are related to car position
  52. +COLLISION_FILTER = [
  53. +    ["static.sidewalk", -1],
  54. +    ["static.road", -1],
  55. +    ["vehicle.", 500],
  56. +]  # list of pairs: agent id (can be part of the name) and impulse value allowed (-1 - disable collision detection entirely)
  57.  
  58.  # Agent settings
  59.  AGENTS = 1
  60.  AGENT_MEMORY_FRACTION = 0.1
  61. -AGENT_GPU = None  # None, a number (to use given GPU for all agents) or a list - example [0, 1, 1] (first agent - GPU 0, 2nd and 3rd GPU 1)
  62. -AGENT_CARLA_INSTANCE = []  # Empty list for first Carla instance or list in size of AGENTS with Carla instance bounds for agents, for excample [1, 1, 2, 2]
  63. +AGENT_GPU = 0  # None, a number (to use given GPU for all agents) or a list - example [0, 1, 1] (first agent - GPU 0, 2nd and 3rd GPU 1)
  64. +AGENT_CARLA_INSTANCE = (
  65. +    []
  66. +)  # Empty list for first Carla instance or list in size of AGENTS with Carla instance bounds for agents, for excample [1, 1, 2, 2]
  67.  UPDATE_WEIGHTS_EVERY = 0  # How frequently to update weights (compared to trainer fits), 0 for episode start only
  68.  AGENT_SHOW_PREVIEW = []  # List of agent id's so show a preview, or empty list
  69.  AGENT_SYNCED = True  # Synchronizes agent with frame updates from Carla
  70. -AGENT_IMG_TYPE = 'grayscaled'  # 'rgb', 'grayscaled' or 'stacked' (stacks last 3 consecutive grayscaled frames)
  71. -AGENT_ADDITIONAL_DATA = ['kmh']  # What additional data to include next to image data in observation space, possible values: kmh
  72. +AGENT_IMG_TYPE = "grayscaled"  # 'rgb', 'grayscaled' or 'stacked' (stacks last 3 consecutive grayscaled frames)
  73. +AGENT_ADDITIONAL_DATA = [
  74. +    "kmh"
  75. +]  # What additional data to include next to image data in observation space, possible values: kmh
  76.  
  77.  # Trainer settings
  78.  MINIBATCH_SIZE = 16  # How many steps (samples) to use for training
  79.  PREDICTION_BATCH_SIZE = 1  # How many samples to predict at once (the more, the faster)
  80. -TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 2  # How many samples to fit at once (the more, the faster) - should be MINIBATCH_SIZE divided by power of 2
  81. +TRAINING_BATCH_SIZE = (
  82. +    MINIBATCH_SIZE // 2
  83. +)  # How many samples to fit at once (the more, the faster) - should be MINIBATCH_SIZE divided by power of 2
  84.  UPDATE_TARGET_EVERY = 100  # Terminal states (end of episodes)
  85. -MODEL_NAME = '5_residual_#CNN_KERNELS#'  # model name, prefixed from sources/models.py, #MODEL_ARCHITECTURE# adds model architectore acronym, #CNN_KERNELS# adds number of kernels from all CNN layers
  86. +MODEL_NAME = "5_residual_#CNN_KERNELS#"  # model name, prefixed from sources/models.py, #MODEL_ARCHITECTURE# adds model architectore acronym, #CNN_KERNELS# adds number of kernels from all CNN layers
  87.  MIN_REWARD = 100  # For model save
  88.  TRAINER_MEMORY_FRACTION = 0.6
  89. -TRAINER_GPU = None  # None - not set, 0, 1, ... - GPU with given index
  90. +TRAINER_GPU = 1  # None - not set, 0, 1, ... - GPU with given index
  91.  SAVE_CHECKPOINT_EVERY = 100  # episodes
  92.  
  93.  # DQN settings
  94.  DISCOUNT = 0.99
  95.  REPLAY_MEMORY_SIZE = 20_000  # How many last steps to keep for model training
  96. -MIN_REPLAY_MEMORY_SIZE = 5_000  # Minimum number of steps in a memory to start training
  97. +# ~ MIN_REPLAY_MEMORY_SIZE = 5_000  # Minimum number of steps in a memory to start training
  98. +MIN_REPLAY_MEMORY_SIZE = 100  # Minimum number of steps in a memory to start training
  99.  
  100.  # Exploration settings
  101.  START_EPSILON = 1
  102. @@ -50,16 +82,16 @@ EPSILON_DECAY = 0.99995  # 0.99975
  103.  MIN_EPSILON = 0.1
  104.  
  105.  # Model settings
  106. -MODEL_BASE = '5_residual_CNN'  # from models.py
  107. -MODEL_HEAD = 'hidden_dense'  # from models.py
  108. -MODEL_SETTINGS = {'hidden_1_units': 256}  # 'hidden_1_units': 1024 for Xception
  109. +MODEL_BASE = "5_residual_CNN"  # from models.py
  110. +MODEL_HEAD = "hidden_dense"  # from models.py
  111. +MODEL_SETTINGS = {"hidden_1_units": 256}  # 'hidden_1_units': 1024 for Xception
  112.  
  113.  # Optimizer settings
  114.  OPTIMIZER_LEARNING_RATE = 0.001
  115.  OPTIMIZER_DECAY = 0.0
  116.  
  117.  # Conv Cam
  118. -CONV_CAM_LAYER = 'auto_act'  # 'auto' - finds and uses last activation layer, 'auto_act' - uses Activation layer after last convolution layer if exists
  119. +CONV_CAM_LAYER = "auto_act"  # 'auto' - finds and uses last activation layer, 'auto_act' - uses Activation layer after last convolution layer if exists
  120.  CONV_CAM_AGENTS = [1]
  121.  
  122.  # Console settings
  123. diff --git a/sources/console.py b/sources/console.py
  124. index 6e74566..6efbc63 100644
  125. --- a/sources/console.py
  126. +++ b/sources/console.py
  127. @@ -186,7 +186,8 @@ class ConsoleStats:
  128.              lines.append('')
  129.  
  130.              # Sets cursor back at the beginning of text block (moves it up)
  131. -            string = '\r' + ('\033[A' * len(lines))
  132. +            # ~ string = '\r' + ('\033[A' * len(lines))
  133. +            string = ""
  134.  
  135.              # Add spaces to form a 70-char long line
  136.              for line in lines:
  137. @@ -197,7 +198,8 @@ class ConsoleStats:
  138.              if self.stop.value == STOP.stopped:
  139.                  return
  140.  
  141. -            time.sleep(0.2)
  142. +            # ~ time.sleep(0.2)
  143. +            time.sleep(3)
  144.  
  145.      @staticmethod
  146.      def print_short(fps_counter, env, qs, action, action_name):
  147. diff --git a/sources/trainer.py b/sources/trainer.py
  148. index 366b4f9..479d9cc 100644
  149. --- a/sources/trainer.py
  150. +++ b/sources/trainer.py
  151. @@ -96,6 +96,7 @@ class ARTDQNTrainer(ARTDQNAgent):
  152.  
  153.          # Start training only if certain number of transitions is already being saved in replay memory
  154.          if len(self.replay_memory) < settings.MIN_REPLAY_MEMORY_SIZE:
  155. +            print(f"too small replay_memory: {len(self.replay_memory)} vs {settings.MIN_REPLAY_MEMORY_SIZE}")
  156.              return False
  157.  
  158.          # Get a minibatch of random samples from memory replay table
  159. @@ -221,6 +222,7 @@ class ARTDQNTrainer(ARTDQNAgent):
  160.              # If .train() call returns false, there's not enough transitions in replay memory
  161.              # Just wait (and exit on 'stop' signal)
  162.              if not self.train():
  163. +                print("train returned false")
  164.                  self.trainer_stats[0] = TRAINER_STATE.waiting
  165.  
  166.                  # Trainer is also a manager for stopping everything as it has to save a checkpoint
  167. @@ -241,6 +243,7 @@ class ARTDQNTrainer(ARTDQNAgent):
  168.              # Training part finished here, measure time and convert it to number of trains per second
  169.              frame_time = time.time() - step_start
  170.              self.tps_counter.append(frame_time)
  171. +            print("training finished")
  172.              self.trainer_stats[1] = len(self.tps_counter)/sum(self.tps_counter)
  173.  
  174.              # Shared flag set by models when they performed good to save a model
  175. @@ -367,6 +370,8 @@ def run(model_path, logdir, stop, weights, weights_iteration, episode, epsilon,
  176.  
  177.      configured_actions = [getattr(ACTIONS, action) for action in settings.ACTIONS]
  178.  
  179. +    print("trainer helo")
  180. +
  181.      # Iterate over episodes until 'stop' signal
  182.      while stop.value != 3:
  183.  
  184. @@ -375,15 +380,18 @@ def run(model_path, logdir, stop, weights, weights_iteration, episode, epsilon,
  185.              trainer.tensorboard.step = episode.value
  186.  
  187.          # Load new transitions put here by models and place then im memory replay table
  188. +        # ~ print(f"num transitions: {transitions.qsize()}")
  189.          for _ in range(transitions.qsize()):
  190.              try:
  191.                  trainer.update_replay_memory(transitions.get(True, 0.1))
  192.              except:
  193.                  break
  194.  
  195. +
  196.          # Log stats in tensorboard
  197.          while not tensorboard_stats.empty():
  198.  
  199. +            # ~ print("trainer-loop2")
  200.              # Added to a Queue by agents
  201.              agent_episode, reward, agent_epsilon, episode_time, frame_time, weighted_reward, *avg_predicted_qs = tensorboard_stats.get_nowait()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement