Guest User

Untitled

a guest
Feb 19th, 2019
422
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.45 KB | None | 0 0
  1. # Welcome to the object detection tutorial !
  2.  
  3. # # Imports
  4. import multiprocessing
  5. from multiprocessing import Pipe
  6. import time
  7. import cv2
  8. import mss
  9. import numpy as np
  10. import os
  11. import sys
  12. os.environ['CUDA_VISIBLE_DEVICES'] = '0'
  13. import tensorflow as tf
  14. from distutils.version import StrictVersion
  15. from collections import defaultdict
  16. from io import StringIO
  17. import pyautogui
  18. import time
  19.  
  20. # pyautogui settings
  21. pyautogui.MINIMUM_DURATION = 0
  22. pyautogui.MINIMUM_SLEEP = 0
  23. pyautogui.PAUSE = 0
  24.  
  25. # title of our window
  26. title = "FPS benchmark"
  27. # set start time to current time
  28. start_time = time.time()
  29. # displays the frame rate every 2 second
  30. display_time = 2
  31. # Set primarry FPS to 0
  32. fps = 0
  33. # Load mss library as sct
  34. sct = mss.mss()
  35. # Set monitor size to capture to MSS
  36. width = 800
  37. height = 600
  38.  
  39. monitor = {"top": 80, "left": 0, "width": width, "height": height}
  40.  
  41. # ## Env setup
  42. from object_detection.utils import ops as utils_ops
  43. from object_detection.utils import label_map_util
  44. from object_detection.utils import visualization_utils as vis_util
  45.  
  46. # # Model preparation
  47. PATH_TO_FROZEN_GRAPH = 'CSGO_frozen_inference_graph.pb'
  48. # List of the strings that is used to add correct label for each box.
  49. PATH_TO_LABELS = 'CSGO_labelmap.pbtxt'
  50. NUM_CLASSES = 4
  51.  
  52. # ## Load a (frozen) Tensorflow model into memory.
  53. label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
  54. categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
  55. category_index = label_map_util.create_category_index(categories)
  56.  
  57. detection_graph = tf.Graph()
  58. with detection_graph.as_default():
  59. od_graph_def = tf.GraphDef()
  60. with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
  61. serialized_graph = fid.read()
  62. od_graph_def.ParseFromString(serialized_graph)
  63. tf.import_graph_def(od_graph_def, name='')
  64.  
  65. def Shoot(mid_x, mid_y):
  66. x = int(mid_x*width)
  67. #y = int(mid_y*height)
  68. y = int(mid_y*height+height/9)
  69. pyautogui.moveTo(x,y)
  70. pyautogui.click()
  71.  
  72. def grab_screen(p_input):
  73. while True:
  74. #Grab screen image
  75. img = np.array(sct.grab(monitor))
  76. img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  77. img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  78.  
  79. # Put image from pipe
  80. p_input.send(img)
  81.  
  82. def TensorflowDetection(p_output, p_input2):
  83. # Detection
  84. with detection_graph.as_default():
  85. with tf.Session(graph=detection_graph) as sess:
  86. while True:
  87. # Get image from pipe
  88. image_np = p_output.recv()
  89. # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  90. image_np_expanded = np.expand_dims(image_np, axis=0)
  91. # Actual detection.
  92. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
  93. boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
  94. scores = detection_graph.get_tensor_by_name('detection_scores:0')
  95. classes = detection_graph.get_tensor_by_name('detection_classes:0')
  96. num_detections = detection_graph.get_tensor_by_name('num_detections:0')
  97. # Visualization of the results of a detection.
  98. (boxes, scores, classes, num_detections) = sess.run(
  99. [boxes, scores, classes, num_detections],
  100. feed_dict={image_tensor: image_np_expanded})
  101. vis_util.visualize_boxes_and_labels_on_image_array(
  102. image_np,
  103. np.squeeze(boxes),
  104. np.squeeze(classes).astype(np.int32),
  105. np.squeeze(scores),
  106. category_index,
  107. use_normalized_coordinates=True,
  108. line_thickness=2)
  109.  
  110. # Send detection image to pipe2
  111. p_input2.send(image_np)
  112.  
  113. array_ch = []
  114. array_c = []
  115. array_th = []
  116. array_t = []
  117. for i,b in enumerate(boxes[0]):
  118. if classes[0][i] == 2: # ch
  119. if scores[0][i] >= 0.5:
  120. mid_x = (boxes[0][i][1]+boxes[0][i][3])/2
  121. mid_y = (boxes[0][i][0]+boxes[0][i][2])/2
  122. array_ch.append([mid_x, mid_y])
  123. cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (0,0,255), -1)
  124. if classes[0][i] == 1: # c
  125. if scores[0][i] >= 0.5:
  126. mid_x = (boxes[0][i][1]+boxes[0][i][3])/2
  127. mid_y = boxes[0][i][0] + (boxes[0][i][2]-boxes[0][i][0])/6
  128. array_c.append([mid_x, mid_y])
  129. cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (50,150,255), -1)
  130. if classes[0][i] == 4: # th
  131. if scores[0][i] >= 0.5:
  132. mid_x = (boxes[0][i][1]+boxes[0][i][3])/2
  133. mid_y = (boxes[0][i][0]+boxes[0][i][2])/2
  134. array_th.append([mid_x, mid_y])
  135. cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (0,0,255), -1)
  136. if classes[0][i] == 3: # t
  137. if scores[0][i] >= 0.5:
  138. mid_x = (boxes[0][i][1]+boxes[0][i][3])/2
  139. mid_y = boxes[0][i][0] + (boxes[0][i][2]-boxes[0][i][0])/6
  140. array_t.append([mid_x, mid_y])
  141. cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (50,150,255), -1)
  142.  
  143. team = "c" # shooting target
  144. if team == "c":
  145. if len(array_ch) > 0:
  146. Shoot(array_ch[0][0], array_ch[0][1])
  147. if len(array_ch) == 0 and len(array_c) > 0:
  148. Shoot(array_c[0][0], array_c[0][1])
  149. if team == "t":
  150. if len(array_th) > 0:
  151. Shoot(array_th[0][0], array_th[0][1])
  152. if len(array_th) == 0 and len(array_t) > 0:
  153. Shoot(array_t[0][0], array_t[0][1])
  154.  
  155.  
  156. def Show_image(p_output2):
  157. global start_time, fps
  158. while True:
  159. image_np = p_output2.recv()
  160. # Show image with detection
  161. cv2.imshow(title, image_np)
  162. # Bellow we calculate our FPS
  163. fps+=1
  164. TIME = time.time() - start_time
  165. if (TIME) >= display_time :
  166. print("FPS: ", fps / (TIME))
  167. fps = 0
  168. start_time = time.time()
  169. # Press "q" to quit
  170. if cv2.waitKey(25) & 0xFF == ord("q"):
  171. cv2.destroyAllWindows()
  172. break
  173.  
  174. if __name__=="__main__":
  175. # Pipes
  176. p_output, p_input = Pipe()
  177. p_output2, p_input2 = Pipe()
  178.  
  179. # creating new processes
  180. p1 = multiprocessing.Process(target=grab_screen, args=(p_input,))
  181. p2 = multiprocessing.Process(target=TensorflowDetection, args=(p_output,p_input2,))
  182. p3 = multiprocessing.Process(target=Show_image, args=(p_output2,))
  183.  
  184. # starting our processes
  185. p1.start()
  186. p2.start()
  187. p3.start()
Advertisement
Add Comment
Please, Sign In to add comment