import numpy as np
import cv2
import depthai as dai
import time
SHAPE = 300
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return cv2.resize(arr, shape).transpose(2, 0, 1).flatten()
p = dai.Pipeline()
p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
xin = p.create(dai.node.XLinkIn)
xin.setMaxDataSize(300*300*3)
xin.setNumFrames(30)
xin.setStreamName('xin')
# NN that detects faces in the image
nn = p.create(dai.node.NeuralNetwork)
nn.setBlobPath("edge_simplified_openvino_2021.4_6shave.blob")
xin.out.link(nn.input)
# Send bouding box from the NN to the host via XLink
nn_xout = p.create(dai.node.XLinkOut)
nn_xout.setStreamName("nn")
nn.out.link(nn_xout.input)
pass_xout = p.create(dai.node.XLinkOut)
pass_xout.input.setQueueSize(2)
pass_xout.input.setBlocking(False)
pass_xout.setStreamName("pass")
nn.passthrough.link(pass_xout.input)
# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
qin = device.getInputQueue("xin")
qPass = device.getOutputQueue(name="pass", maxSize=2, blocking=False)
shape = (3, SHAPE, SHAPE)
def get_frame(imfFrame, shape):
return np.array(imfFrame.getData()).view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8)
cap = cv2.VideoCapture("chinese_traffic.mp4")
i=0
while cap.isOpened():
i += 1
ok, frame = cap.read()
if not ok:
break
tstamp = time.monotonic()
nn_data = dai.NNData()
nn_data.setLayer("input", to_planar(frame, (SHAPE, SHAPE)))
nn_data.setTimestamp(tstamp)
nn_data.setSequenceNum(i)
qin.send(nn_data)
cv2.imshow("Laplacian edge detection", get_frame(qNn.get(), shape))
cv2.imshow("Color", frame)
nn_pass = qPass.get()
print(nn_pass.getSequenceNum(), nn_pass.getTimestamp())
if cv2.waitKey(1) == ord('q'):
break