Data hosted with ♥ by Pastebin.com - Download Raw - See Original
  1. import numpy as np
  2. import cv2
  3. import depthai as dai
  4. import time
  5.  
  6. SHAPE = 300
  7. def to_planar(arr: np.ndarray, shape: tuple) -> list:
  8. return cv2.resize(arr, shape).transpose(2, 0, 1).flatten()
  9.  
  10.  
  11. p = dai.Pipeline()
  12. p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
  13.  
  14. xin = p.create(dai.node.XLinkIn)
  15. xin.setMaxDataSize(300*300*3)
  16. xin.setNumFrames(30)
  17. xin.setStreamName('xin')
  18.  
  19. # NN that detects faces in the image
  20. nn = p.create(dai.node.NeuralNetwork)
  21. nn.setBlobPath("edge_simplified_openvino_2021.4_6shave.blob")
  22. xin.out.link(nn.input)
  23.  
  24. # Send bouding box from the NN to the host via XLink
  25. nn_xout = p.create(dai.node.XLinkOut)
  26. nn_xout.setStreamName("nn")
  27. nn.out.link(nn_xout.input)
  28.  
  29. pass_xout = p.create(dai.node.XLinkOut)
  30. pass_xout.input.setQueueSize(2)
  31. pass_xout.input.setBlocking(False)
  32. pass_xout.setStreamName("pass")
  33. nn.passthrough.link(pass_xout.input)
  34.  
  35.  
  36. # Pipeline is defined, now we can connect to the device
  37. with dai.Device(p) as device:
  38. qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
  39. qin = device.getInputQueue("xin")
  40. qPass = device.getOutputQueue(name="pass", maxSize=2, blocking=False)
  41. shape = (3, SHAPE, SHAPE)
  42.  
  43. def get_frame(imfFrame, shape):
  44. return np.array(imfFrame.getData()).view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8)
  45.  
  46. cap = cv2.VideoCapture("chinese_traffic.mp4")
  47.  
  48. i=0
  49. while cap.isOpened():
  50. i += 1
  51. ok, frame = cap.read()
  52. if not ok:
  53. break
  54.  
  55. tstamp = time.monotonic()
  56. nn_data = dai.NNData()
  57. nn_data.setLayer("input", to_planar(frame, (SHAPE, SHAPE)))
  58. nn_data.setTimestamp(tstamp)
  59. nn_data.setSequenceNum(i)
  60. qin.send(nn_data)
  61.  
  62. cv2.imshow("Laplacian edge detection", get_frame(qNn.get(), shape))
  63. cv2.imshow("Color", frame)
  64.  
  65. nn_pass = qPass.get()
  66. print(nn_pass.getSequenceNum(), nn_pass.getTimestamp())
  67.  
  68. if cv2.waitKey(1) == ord('q'):
  69. break
  70.