Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tkinter
- import cv2
- import PIL.Image, PIL.ImageTk
- import time
- import serial
- import time
- import threading
- import numpy as np
- import tensorflow as tf
- from tensorflow.python.platform import gfile
- from ssd_encoder_decoder.ssd_output_decoder import decode_detections
- class App:
- def __init__(self, window, window_title, video_source=0):
- self.window = window
- self.state_z = 0
- self.state_y = 0
- self.state_x = 0
- self.window.title(window_title)
- self.video_source = video_source
- # start camera using method and other is GUI, Gui may can multithread
- self.vid = MyVideoCapture(self.video_source)
- # Create a canvas that can fit the above video source size
- self.canvas = tkinter.Canvas(window, width = 640, height = 480)
- self.canvas.pack()
- # Button that lets the user take a snapshot
- self.auto_f = tkinter.Button(window, text="HomeZ", width=50, command=self.auto_fuc)
- self.Home_z = tkinter.Button(window, text="HomeZ", width=50, command=self.HOME_Z)
- self.Home_x = tkinter.Button(window, text="HomeX", width=50, command=self.HOME_X)
- self.Home_y = tkinter.Button(window, text="HomeY", width=50, command=self.HOME_Y)
- self.btn_snapshot=tkinter.Button(window, text="Snapshot", width=50, command=self.snapshot)
- self.btn_connect = tkinter.Button(window, text="con", width=50, command=self.fuction)
- self.btn_NZ1 = tkinter.Button(window, text="negative z", width=50, command=self.move_NZ1)
- self.btn_PZ1 = tkinter.Button(window, text="positive z", width=50, command=self.move_PZ1)
- self.btn_NX1 = tkinter.Button(window, text="negative x", width=50, command=self.move_NX1)
- self.btn_PX1 = tkinter.Button(window, text="positive x", width=50, command=self.move_PX1)
- self.btn_NY1 = tkinter.Button(window, text="negative y", width=50, command=self.move_NY1)
- self.btn_PY1 = tkinter.Button(window, text="positive y", width=50, command=self.move_PY1)
- self.btn_AU = tkinter.Button(window, text="Auto", width=50, command=self.AUTO)
- self.auto_f.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_connect.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_NZ1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_PZ1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_NX1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_PX1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_NY1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_PY1.pack(anchor=tkinter.CENTER, expand=True)
- self.btn_AU.pack(anchor=tkinter.CENTER, expand=True)
- self.Home_z.pack(anchor=tkinter.CENTER, expand=True)
- self.Home_x.pack(anchor=tkinter.CENTER, expand=True)
- self.Home_y.pack(anchor=tkinter.CENTER, expand=True)
- # After it is called once, the update method will be automatically called every delay milliseconds
- self.delay = 15
- self.update()
- self.window.mainloop()
- def auto_fuc(self):
- pic = []
- self.Home_z
- self.vid = MyVideoCapture(self.video_source)
- for i in range(25):
- self.state_z = i
- ret, frame = self.vid.get_frame()
- gcode = 'G1 Z' + str(self.state_z) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- time.sleep(1)
- if ret:
- cv2.imwrite("frame-" + str(i) + ".jpg",cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
- img = cv2.imread("frame-" + str(i) + ".jpg")
- edges = cv2.Canny(img, 100, 200)
- print(np.mean(edges))
- pic.append(np.mean(edges))
- print(pic.index(max(pic)))
- self.state_z = pic.index(max(pic))
- gcode = 'G1 Z' + str(self.state_z) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def snapshot(self):
- # Get a frame from the video source
- ret, frame = self.vid.get_frame()
- if ret:
- cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
- def update(self):
- # Get a frame from the video source
- ret, frame = self.vid.get_frame()
- if ret:
- self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))
- self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
- self.window.after(self.delay, self.update)
- def fuction(self):
- global ser
- ser = serial.Serial(
- port='COM3', \
- baudrate=115200, \
- parity=serial.PARITY_NONE, \
- stopbits=serial.STOPBITS_ONE, \
- bytesize=serial.EIGHTBITS, \
- timeout=0)
- def HOME_X(self):
- ser.write(b'G28 X\n')
- self.state_y = 0
- def HOME_Y(self):
- ser.write(b'G28 Y\n')
- self.state_y = 0
- def move_PX1(self):
- self.state_x = self.state_x + 1
- gcode = 'G1 X' + str(self.state_x) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def move_NX1(self):
- self.state_x = self.state_x - 1
- gcode = 'G1 X' + str(self.state_x) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def move_PY1(self):
- self.state_y = self.state_y + 1
- gcode = 'G1 Y' + str(self.state_y) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def move_NY1(self):
- self.state_y = self.state_y - 1
- gcode = 'G1 Y' + str(self.state_y) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def move_NZ1(self):
- if self.state_z > 0:
- self.state_z = self.state_z - 1
- else:
- self.state_z = 0
- gcode = 'G1 Z' + str(self.state_z) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def move_PZ1(self):
- if self.state_z < 25:
- self.state_z = self.state_z + 1
- else:
- self.state_z = 25
- gcode = 'G1 Z' + str(self.state_z) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def HOME_Z(self):
- global state_z
- state_z = 0
- gcode = 'G1 Z' + str(state_z) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- def AUTO(self):
- self.state_y = 0
- self.state_x = 0
- ser.write(b'G28 X\n')
- time.sleep(5)
- ser.write(b'G28 Y\n')
- time.sleep(5)
- self.vid = MyVideoCapture(self.video_source)
- for self.state_y in range(5):
- if (self.state_y % 2) == 0:
- self.state_x = 0
- for self.state_x in range(18):
- ret, frame = self.vid.get_frame()
- print(str(self.state_x) + ' ' + str(self.state_y))
- gcode = 'G1 X' + str(self.state_x) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- time.sleep(1)
- self.snapshot()
- self.delay = 1
- self.update()
- else:
- #self.state_x = 17
- for self.state_x in reversed(range(18)):
- ret, frame = self.vid.get_frame()
- print(str(self.state_x) + ' ' + str(self.state_y))
- gcode = 'G1 X' + str(self.state_y) + '\n'
- ser.write(bytes(gcode, 'utf-8'))
- time.sleep(1)
- self.snapshot()
- self.delay = 1
- self.update()
- self.state_y = self.state_y + 1
- class MyVideoCapture:
- def __init__(self, video_source=0):
- # Open the video source
- self.vid = cv2.VideoCapture(video_source)
- self.f = gfile.FastGFile("TensorRT_model.pb", 'rb')
- self.graph_def = tf.GraphDef()
- # Parses a serialized binary message into the current message.
- self.graph_def.ParseFromString(f.read())
- self.f.close()
- self.tf.reset_default_graph()
- self.sess = tf.Session()
- self.sess.graph.as_default()
- # Import a serialized TensorFlow `GraphDef` protocol buffer
- # and place into the current default `Graph`.
- self.tf.import_graph_def(graph_def)
- self.predict_tensor = sess.graph.get_tensor_by_name('import/predictions/concat:0')
- self.classes = ["backgroud", "Ascarlis", "H.diminuta", "Large Egg", "Tenia"]
- if not self.vid.isOpened():
- raise ValueError("Unable to open video source", video_source)
- # Get video source width and height
- self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
- self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
- def get_frame(self):
- if self.vid.isOpened():
- ret, frames = self.vid.read()
- if ret:
- frames = frame[np.newaxis]
- y_pred = sess.run(predict_tensor, {'import/input_1_1:0': frames})
- y_pred_decoded = decode_detections(y_pred,
- confidence_thresh=0.75,
- iou_threshold=0.5,
- top_k=200,
- normalize_coords=True,
- img_height=480,
- img_width=640)
- for box in y_pred_decoded[0]:
- xmin = int(box[-4])
- ymin = int(box[-3])
- xmax = int(box[-2])
- ymax = int(box[-1])
- label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
- cv2.putText(frame, label, (xmin, ymin - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 1,
- cv2.LINE_AA)
- cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 0), 2)
- # Return a boolean success flag and the current frame converted to BGR
- return (ret,frames)
- else:
- return (ret, None)
- else:
- return (ret, None)
- # Release the video source when the object is destroyed
- def __del__(self):
- if self.vid.isOpened():
- self.vid.release()
- # Create a window and pass it to the Application object
- App(tkinter.Tk(), "Tkinter and OpenCV")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement