Advertisement
Guest User

Untitled

a guest
Mar 30th, 2020
121
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.32 KB | None | 0 0
  1. import tkinter
  2. import cv2
  3. import PIL.Image, PIL.ImageTk
  4. import time
  5. import serial
  6. import time
  7. import threading
  8. import numpy as np
  9. import tensorflow as tf
  10. from tensorflow.python.platform import gfile
  11. from ssd_encoder_decoder.ssd_output_decoder import decode_detections
  12.  
  13. class App:
  14. def __init__(self, window, window_title, video_source=0):
  15. self.window = window
  16. self.state_z = 0
  17. self.state_y = 0
  18. self.state_x = 0
  19. self.window.title(window_title)
  20. self.video_source = video_source
  21.  
  22. # start camera using method and other is GUI, Gui may can multithread
  23. self.vid = MyVideoCapture(self.video_source)
  24.  
  25. # Create a canvas that can fit the above video source size
  26. self.canvas = tkinter.Canvas(window, width = 640, height = 480)
  27. self.canvas.pack()
  28.  
  29. # Button that lets the user take a snapshot
  30. self.auto_f = tkinter.Button(window, text="HomeZ", width=50, command=self.auto_fuc)
  31. self.Home_z = tkinter.Button(window, text="HomeZ", width=50, command=self.HOME_Z)
  32. self.Home_x = tkinter.Button(window, text="HomeX", width=50, command=self.HOME_X)
  33. self.Home_y = tkinter.Button(window, text="HomeY", width=50, command=self.HOME_Y)
  34. self.btn_snapshot=tkinter.Button(window, text="Snapshot", width=50, command=self.snapshot)
  35. self.btn_connect = tkinter.Button(window, text="con", width=50, command=self.fuction)
  36. self.btn_NZ1 = tkinter.Button(window, text="negative z", width=50, command=self.move_NZ1)
  37. self.btn_PZ1 = tkinter.Button(window, text="positive z", width=50, command=self.move_PZ1)
  38. self.btn_NX1 = tkinter.Button(window, text="negative x", width=50, command=self.move_NX1)
  39. self.btn_PX1 = tkinter.Button(window, text="positive x", width=50, command=self.move_PX1)
  40. self.btn_NY1 = tkinter.Button(window, text="negative y", width=50, command=self.move_NY1)
  41. self.btn_PY1 = tkinter.Button(window, text="positive y", width=50, command=self.move_PY1)
  42. self.btn_AU = tkinter.Button(window, text="Auto", width=50, command=self.AUTO)
  43. self.auto_f.pack(anchor=tkinter.CENTER, expand=True)
  44. self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
  45. self.btn_connect.pack(anchor=tkinter.CENTER, expand=True)
  46. self.btn_NZ1.pack(anchor=tkinter.CENTER, expand=True)
  47. self.btn_PZ1.pack(anchor=tkinter.CENTER, expand=True)
  48. self.btn_NX1.pack(anchor=tkinter.CENTER, expand=True)
  49. self.btn_PX1.pack(anchor=tkinter.CENTER, expand=True)
  50. self.btn_NY1.pack(anchor=tkinter.CENTER, expand=True)
  51. self.btn_PY1.pack(anchor=tkinter.CENTER, expand=True)
  52. self.btn_AU.pack(anchor=tkinter.CENTER, expand=True)
  53. self.Home_z.pack(anchor=tkinter.CENTER, expand=True)
  54. self.Home_x.pack(anchor=tkinter.CENTER, expand=True)
  55. self.Home_y.pack(anchor=tkinter.CENTER, expand=True)
  56. # After it is called once, the update method will be automatically called every delay milliseconds
  57. self.delay = 15
  58. self.update()
  59.  
  60. self.window.mainloop()
  61. def auto_fuc(self):
  62. pic = []
  63. self.Home_z
  64. self.vid = MyVideoCapture(self.video_source)
  65. for i in range(25):
  66. self.state_z = i
  67. ret, frame = self.vid.get_frame()
  68. gcode = 'G1 Z' + str(self.state_z) + '\n'
  69. ser.write(bytes(gcode, 'utf-8'))
  70. time.sleep(1)
  71. if ret:
  72. cv2.imwrite("frame-" + str(i) + ".jpg",cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
  73. img = cv2.imread("frame-" + str(i) + ".jpg")
  74. edges = cv2.Canny(img, 100, 200)
  75. print(np.mean(edges))
  76. pic.append(np.mean(edges))
  77. print(pic.index(max(pic)))
  78. self.state_z = pic.index(max(pic))
  79. gcode = 'G1 Z' + str(self.state_z) + '\n'
  80. ser.write(bytes(gcode, 'utf-8'))
  81.  
  82. def snapshot(self):
  83. # Get a frame from the video source
  84. ret, frame = self.vid.get_frame()
  85. if ret:
  86. cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
  87.  
  88. def update(self):
  89. # Get a frame from the video source
  90. ret, frame = self.vid.get_frame()
  91.  
  92. if ret:
  93. self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))
  94. self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
  95.  
  96. self.window.after(self.delay, self.update)
  97.  
  98. def fuction(self):
  99. global ser
  100. ser = serial.Serial(
  101. port='COM3', \
  102. baudrate=115200, \
  103. parity=serial.PARITY_NONE, \
  104. stopbits=serial.STOPBITS_ONE, \
  105. bytesize=serial.EIGHTBITS, \
  106. timeout=0)
  107.  
  108. def HOME_X(self):
  109. ser.write(b'G28 X\n')
  110. self.state_y = 0
  111.  
  112. def HOME_Y(self):
  113. ser.write(b'G28 Y\n')
  114. self.state_y = 0
  115.  
  116. def move_PX1(self):
  117. self.state_x = self.state_x + 1
  118. gcode = 'G1 X' + str(self.state_x) + '\n'
  119. ser.write(bytes(gcode, 'utf-8'))
  120.  
  121. def move_NX1(self):
  122. self.state_x = self.state_x - 1
  123. gcode = 'G1 X' + str(self.state_x) + '\n'
  124. ser.write(bytes(gcode, 'utf-8'))
  125.  
  126. def move_PY1(self):
  127. self.state_y = self.state_y + 1
  128. gcode = 'G1 Y' + str(self.state_y) + '\n'
  129. ser.write(bytes(gcode, 'utf-8'))
  130.  
  131. def move_NY1(self):
  132. self.state_y = self.state_y - 1
  133. gcode = 'G1 Y' + str(self.state_y) + '\n'
  134. ser.write(bytes(gcode, 'utf-8'))
  135.  
  136. def move_NZ1(self):
  137. if self.state_z > 0:
  138. self.state_z = self.state_z - 1
  139. else:
  140. self.state_z = 0
  141. gcode = 'G1 Z' + str(self.state_z) + '\n'
  142. ser.write(bytes(gcode, 'utf-8'))
  143.  
  144. def move_PZ1(self):
  145. if self.state_z < 25:
  146. self.state_z = self.state_z + 1
  147. else:
  148. self.state_z = 25
  149. gcode = 'G1 Z' + str(self.state_z) + '\n'
  150. ser.write(bytes(gcode, 'utf-8'))
  151.  
  152. def HOME_Z(self):
  153. global state_z
  154. state_z = 0
  155. gcode = 'G1 Z' + str(state_z) + '\n'
  156. ser.write(bytes(gcode, 'utf-8'))
  157.  
  158. def AUTO(self):
  159. self.state_y = 0
  160. self.state_x = 0
  161. ser.write(b'G28 X\n')
  162. time.sleep(5)
  163. ser.write(b'G28 Y\n')
  164. time.sleep(5)
  165. self.vid = MyVideoCapture(self.video_source)
  166. for self.state_y in range(5):
  167. if (self.state_y % 2) == 0:
  168. self.state_x = 0
  169. for self.state_x in range(18):
  170. ret, frame = self.vid.get_frame()
  171. print(str(self.state_x) + ' ' + str(self.state_y))
  172. gcode = 'G1 X' + str(self.state_x) + '\n'
  173. ser.write(bytes(gcode, 'utf-8'))
  174. time.sleep(1)
  175. self.snapshot()
  176. self.delay = 1
  177. self.update()
  178. else:
  179. #self.state_x = 17
  180. for self.state_x in reversed(range(18)):
  181. ret, frame = self.vid.get_frame()
  182. print(str(self.state_x) + ' ' + str(self.state_y))
  183. gcode = 'G1 X' + str(self.state_y) + '\n'
  184. ser.write(bytes(gcode, 'utf-8'))
  185. time.sleep(1)
  186. self.snapshot()
  187. self.delay = 1
  188. self.update()
  189. self.state_y = self.state_y + 1
  190.  
  191.  
  192.  
  193. class MyVideoCapture:
  194. def __init__(self, video_source=0):
  195. # Open the video source
  196. self.vid = cv2.VideoCapture(video_source)
  197. self.f = gfile.FastGFile("TensorRT_model.pb", 'rb')
  198. self.graph_def = tf.GraphDef()
  199. # Parses a serialized binary message into the current message.
  200. self.graph_def.ParseFromString(f.read())
  201. self.f.close()
  202. self.tf.reset_default_graph()
  203. self.sess = tf.Session()
  204. self.sess.graph.as_default()
  205. # Import a serialized TensorFlow `GraphDef` protocol buffer
  206. # and place into the current default `Graph`.
  207. self.tf.import_graph_def(graph_def)
  208. self.predict_tensor = sess.graph.get_tensor_by_name('import/predictions/concat:0')
  209. self.classes = ["backgroud", "Ascarlis", "H.diminuta", "Large Egg", "Tenia"]
  210. if not self.vid.isOpened():
  211. raise ValueError("Unable to open video source", video_source)
  212.  
  213. # Get video source width and height
  214. self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
  215. self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
  216.  
  217. def get_frame(self):
  218. if self.vid.isOpened():
  219. ret, frames = self.vid.read()
  220.  
  221. if ret:
  222. frames = frame[np.newaxis]
  223. y_pred = sess.run(predict_tensor, {'import/input_1_1:0': frames})
  224. y_pred_decoded = decode_detections(y_pred,
  225. confidence_thresh=0.75,
  226. iou_threshold=0.5,
  227. top_k=200,
  228. normalize_coords=True,
  229. img_height=480,
  230. img_width=640)
  231. for box in y_pred_decoded[0]:
  232. xmin = int(box[-4])
  233. ymin = int(box[-3])
  234. xmax = int(box[-2])
  235. ymax = int(box[-1])
  236. label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
  237. cv2.putText(frame, label, (xmin, ymin - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 1,
  238. cv2.LINE_AA)
  239. cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 0), 2)
  240. # Return a boolean success flag and the current frame converted to BGR
  241. return (ret,frames)
  242. else:
  243. return (ret, None)
  244. else:
  245. return (ret, None)
  246.  
  247. # Release the video source when the object is destroyed
  248. def __del__(self):
  249. if self.vid.isOpened():
  250. self.vid.release()
  251.  
  252.  
  253.  
  254. # Create a window and pass it to the Application object
  255. App(tkinter.Tk(), "Tkinter and OpenCV")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement