Data hosted with ♥ by Pastebin.com - Download Raw - See Original
  1. import depthai as dai
  2. import json
  3. from pathlib import Path
  4. import blobconverter
  5. import cv2
  6. import numpy as np
  7.  
  8. pipeline = dai.Pipeline()
  9. # Define source and output
  10. camRgb = pipeline.create(dai.node.ColorCamera)
  11. camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP)
  12. camRgb.setInterleaved(False)
  13. camRgb.setIspScale(1,5) # 4056x3040 -> 812x608
  14. camRgb.setPreviewSize(812, 608)
  15. camRgb.setIspNumFramesPool(2)
  16. camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
  17. # Slightly lower FPS to avoid lag, as ISP takes more resources at 12MP
  18. camRgb.setFps(10)
  19.  
  20.  
  21. manip = pipeline.create(dai.node.ImageManip)
  22. manip.setMaxOutputFrameSize(812 * 608 * 3) # 300x300x3
  23. rgbRr = dai.RotatedRect()
  24. rgbRr.center.x, rgbRr.center.y = camRgb.getPreviewWidth() // 2, camRgb.getPreviewHeight() // 2
  25. rgbRr.size.width, rgbRr.size.height = camRgb.getPreviewHeight(), camRgb.getPreviewWidth()
  26. rgbRr.angle = 90
  27. manip.initialConfig.setCropRotatedRect(rgbRr, False)    #manip.initialConfig.setCropRect(0, 0, 1, 1)
  28. camRgb.preview.link(manip.inputImage)
  29.  
  30. resizeManip = pipeline.create(dai.node.ImageManip)
  31. resizeManip.initialConfig.setResizeThumbnail(300, 300)
  32. resizeManip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
  33. manip.out.link(resizeManip.inputImage)
  34.  
  35. xoutIsp = pipeline.create(dai.node.XLinkOut)
  36. xoutIsp.setStreamName("isp")
  37. manip.out.link(xoutIsp.input)
  38.  
  39. #manip.initialConfig.setRotation(-90)
  40. #manip.initialConfig.setRotationDegrees(90)
  41.  
  42.  
  43. # NN to demonstrate how to run inference on full FOV frames
  44. nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
  45. nn.setConfidenceThreshold(0.5)
  46. nn.setBlobPath(blobconverter.from_zoo(name='mobilenet-ssd'))
  47. resizeManip.out.link(nn.input)
  48.  
  49. xoutNn = pipeline.create(dai.node.XLinkOut)
  50. xoutNn.setStreamName("nn")
  51. nn.out.link(xoutNn.input)
  52.  
  53. xoutRgb = pipeline.create(dai.node.XLinkOut)
  54. xoutRgb.setStreamName("pass")
  55. nn.passthrough.link(xoutRgb.input)
  56.  
  57. with dai.Device(pipeline) as device:
  58.     qIsp = device.getOutputQueue(name="isp", maxSize=4, blocking=False)
  59.     qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
  60.     qPass = device.getOutputQueue(name="pass", maxSize=4, blocking=False)
  61.  
  62.     detections = []
  63.     frame = None
  64.  
  65.     def frameNorm(frame, bbox):
  66.         normVals = np.full(len(bbox), frame.shape[0])
  67.         normVals[::2] = frame.shape[1]
  68.         return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
  69.  
  70.     def displayFrame(name, frame):
  71.         color = (255, 0, 0)
  72.         for detection in detections:
  73.             bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
  74.             cv2.putText(frame, str(detection.label), (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
  75.             cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
  76.             cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
  77.         # Show the frame
  78.         cv2.imshow(name, frame)
  79.  
  80.     while True:
  81.         if qDet.has():
  82.             detections = qDet.get().detections
  83.         if qPass.has():
  84.             frame = qPass.get().getCvFrame()
  85.  
  86.         if frame is not None:
  87.             displayFrame("pass", frame)
  88.  
  89.         if qIsp.has():
  90.             isp_frame = qIsp.get().getCvFrame()
  91.             print(isp_frame.shape)
  92.             cv2.imshow("isp", isp_frame)
  93.         if cv2.waitKey(1) == ord('q'):
  94.             break
  95.  
  96.