Advertisement
Guest User

mlapiconfig

a guest
Nov 20th, 2021
288
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 28.01 KB | None | 0 0
  1. ---
  2. # DO not remove the --- at the top of the file
  3.  
  4. # REMEMBER to always quote any option containing a {[secret]} or {{variable}} ->
  5. #my_custom_variable: {{s_path}}/to_a_file.txt <---- INCORRECT
  6. #some_thing: {[secrets]} are cool <---- INCORRECT
  7. #some_thing: '{[secrets]} are cool' <---- CORRECT
  8.  
  9. #my_custom_variable: '{{s_path}}/to_a_file.txt' <--- CORRECT
  10. #some_thing: '{[secrets]} are cool' <---- CORRECT
  11.  
  12.  
  13.  
  14. #######################################
  15. # ------ [ GENERAL SECTION ] ------
  16. #######################################
  17. # LEAVE base_data_path AS THE FIRST KEY! or as high up as possible. Then it can be used to build all the other paths.
  18. # ***** PLEASE MAKE THIS AN ABSOLUTE PATH *****
  19. # ***** PLEASE MAKE THIS AN ABSOLUTE PATH *****
  20. # Example if on a host without neo-ZMES: /home/me/.opt/mlapi
  21. # Example: if on the same host as neo-ZMES: /var/lib/zmeventnotification/mlapi
  22. # It seems important to keep this key as the first one (or as high up) in the YAML file, this enables the use of
  23. # {{base_data_path}} in the base keys.
  24. # This is where mlapi is installed. (Default: .)
  25.  
  26. # ***** PLEASE MAKE THIS AN ABSOLUTE PATH *****
  27. base_data_path: /var/lib/zmeventnotification
  28.  
  29. # the secret key that will be used to sign
  30. # JWT tokens. Make sure you change the value
  31. # in your mlapisecrets.yml
  32. mlapi_secret_key: '{[MLAPI_SECRET_KEY]}'
  33.  
  34.  
  35. # folder where images will be uploaded
  36. # (Default ./images)
  37. image_path: '{{base_data_path}}/images'
  38.  
  39. # folder where the user DB will be stored (Default: ./db)
  40. db_path: '/config/db'
  41.  
  42. # Where the MLAPI secrets file is located if you are using {[secrets]}
  43. secrets: '/etc/zm/zm_secrets.yml'
  44.  
  45. # add/override the object with the highest confidence in same_model_sequence_strategy to the detections (Default: no)
  46. # *** EXPERIMENTAL WIP ***
  47. # same_model_high_conf: yes
  48.  
  49. # sequence of models to run for detection, the sequence follows the order you specify.
  50. model_sequence : object, face
  51. #model_sequence : object, face, alpr
  52. #model_sequence : face, object
  53.  
  54. # Frames to run detections on. Default is snapshot, alarm, snapshot
  55. frame_set : snapshot, alarm, snapshot
  56. #frame_set : snapshot, 70, snapshot, 120, 150, alarm
  57.  
  58. ### STRATEGIES ###
  59. # first - stop after finding a detection that makes it through filtering
  60. # most - whichever frame/sequence has the most labels detected
  61. # most_unique - whichever frame/sequence has more unique matches (a dog and a person would beat 3 persons detected)
  62. # most_models (frame only) - whichever frame has more than 1 model detected in it
  63. # ---- an object and a face detection beats just an object detection even if the object detection has 6 matches
  64. # union (frame only) - chain together the matches from one frame to the next
  65.  
  66. # Frame strategy -> first, most, most_models, most_unique, union
  67. frame_strategy: first
  68. # When running the same model, and you have more than 1 sequence in that model, this is the strategy
  69. # first, most, most_unique
  70. same_model_sequence_strategy: most
  71.  
  72. zmes_keys:
  73. # Format -> 'name of zmes host (can be anything as long as they both match in mlapiconfig and objectconfig)': '<key>',
  74. mlapi_one: '{[mlapi_one_key]}'
  75.  
  76. # If yes, it will attempt to hide all traces of your PORTAl url in logs as well as hide/obfuscate tokens and passwords
  77. # With this you can copy and paste logs without having to comb through and sanitize sensitive information.
  78. sanitize_logs: no
  79.  
  80. # The string to show when the logs are sanitized instead of the sensitive info (Default: <sanitized>)
  81. #sanitize_str: <obfuscated>
  82.  
  83. # port that mlapi will listen on. (Default: 5000)
  84. port: 5000
  85.  
  86. # Maximum # of processes that will be forked
  87. # to handle requests. Note that each process will
  88. # have its own copy of the model, so memory can
  89. # build up very quickly
  90. # This number also dictates how many requests will be executed in parallel
  91. # The rest will be queued
  92. # If you are using bjoern, MAKE SURE processes is 1
  93. # For now, keep this to 1 if you are on a GPU and using flask
  94. # Seems to be useful if using CPU only?
  95. processes: 1
  96.  
  97. # WSGI Framework, flask has the option to handle https bjoern does not
  98. # (this module does not make use of flask https anyways, to clarify)
  99. # (Default: flask)
  100. #wsgi_server: bjoern
  101.  
  102.  
  103. # This set up will keep debug logging on for now, explore the TERM helpers.txt file in the 'tools' dir and the shell
  104. # functions 'es.debug.objdet' and 'es.baredebug.objdet' to see how to use them. I Also reccomend installing 'bat' for linux
  105. # and using the aliases that are piped out to bat to view the logs in color coded and themed output.
  106. pyzm_overrides:
  107. # levels
  108. log_level_syslog: 5
  109. log_level_file: 5
  110. log_level_debug: 5
  111. # dont log to the DB (-5)
  112. log_level_db: -5
  113. # log levels -> 1 dbg/print/blank 0 info, -1 warn, -2 err, -3 fatal, -4 panic, -5 off (only for log_debug_file)
  114. log_debug_file: 1
  115. log_debug: True
  116. log_debug_target: _zm_mlapi|_zmes
  117.  
  118.  
  119. # Name of the user/group running mlapi (same as user for service file). Default: www-data or apache depending on system
  120. # If mlapi is on a system where ZoneMinder is not installed then set this user to whoever is running the mlapi command/service file
  121. # If ZoneMinder is installed it will use ZMLogger by default and use www-data or apache.
  122. # NOTE ** IF ZM is not installed set log_user and log_group to the user you have running mlapi or the log rotation will not work correctly.
  123.  
  124. #log_user: MyOwnUserName
  125. #log_group: MyOwnGroupName
  126.  
  127. # Path to where you want the log file stored, not the name of the file, the directory (Default: {{base_data_path}}/logs)
  128. log_path: /var/log/zm
  129.  
  130. # Name of the logfile DO NOT add trailing '.log' Default: zm_mlapi
  131. #log_name: mlapi-host1
  132.  
  133.  
  134.  
  135. # You can now limit the # of detection process
  136. # per target processor. If not specified, default is 1
  137. # Other detection processes will wait to acquire lock
  138. # This is important if you are using mobiledet tpu model followed immediately by mobilenet v2 tpu model
  139. # if you have tpu processes to 1 and try to load both mobiledet and mobilenetv2 it will timeout and error, set to 3+ for tpu and gpu
  140. cpu_max_processes: 3
  141. tpu_max_processes: 3
  142. gpu_max_processes: 3
  143. # Time to wait in seconds per processor to be free Default is 120 (2 mins)
  144. cpu_max_lock_wait: 120
  145. tpu_max_lock_wait: 30
  146. gpu_max_lock_wait: 60
  147.  
  148. ############# DEPRECATED / UNSUPPORTED #############
  149. # RESIZE is sent by zm_detect in its 'stream options'. This prevents bounding boxes from being messed up due to resizing
  150. # ALLOW_SELF_SIGNED - This is no longer supported, neo-ZMES will send this option along with the request
  151. ###################################################################
  152.  
  153. # Instead of specifying _polygonzone in the per monitor overrides you can import the zones you already created in ZM
  154. import_zm_zones: no
  155. # This only works when you import ZM Zones, by activating this it will, by default, import ZM zones.
  156. # the detected object is only a match if the object is inside the zone that raised the alarm.
  157. only_triggered_zm_zones: no
  158.  
  159. # This saves the previous detection results and compares the new detection to it. You can specify how much
  160. # an object has to of moved from its previously detected position to not be filtered out. This helps if you have a car
  161. # in the driveway and you keep being notified about a car in the driveway. ** The new logix IS EXPERIMENTAL **
  162. # when the new logic is polished it will work much better.
  163. match_past_detections: no
  164. # The max difference in area between the previously detected object if match_past_detection is on
  165. # can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
  166. # object can differ ever so slightly between detections. Contributor @neillbell put in this PR
  167. # to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
  168. # so if the script detects a car in the same spot but the bounding boxes are not in the exact same position, if its
  169. # within %5 of its previously detected position it will be considered 'in the same spot' and you won't be notified.
  170. # Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
  171. # example:
  172. # person_past_det_max_diff_area: 5%
  173. # car_past_det_max_diff_area: 5000px
  174. past_det_max_diff_area: 5%
  175.  
  176. # this is the maximum size a detected object can have compared to whole image. You can specify it in px or %
  177. # This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
  178. # I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
  179. # and therefore I set mine to 70% because I know any valid detected objected cannot be larger than 70% of the image.
  180.  
  181. #max_detection_size: 90%
  182.  
  183. # How much area the detected object must take up inside the polygon/zone, also per object settings
  184. # person_contained_area: 50% means person must have 50% of its bounding boxes area inside the polygon or zone
  185. # 1 pixel will behave like the source repos version, any part of the detected object inside of the polygon/zone is a match
  186. contained_area: 1px
  187. #contained_area: 10%
  188.  
  189.  
  190. # This is a useful debugging trick. If you are chaining models and want to know which
  191. # model detected an object, make this yes. When yes, it will prefix the model name before the
  192. # detected object. Example: Instead of 'person', it will say 'person(yolo[gpu])' or 'person(tpu)'
  193. show_models: no
  194. ###########################################
  195. # ------ [ OBJECT MODEL SECTION ] ------
  196. ###########################################
  197.  
  198. object_detection_pattern: (person|car|motorbike|bus|truck|boat|dog|cat)
  199. object_min_confidence: 0.6
  200.  
  201.  
  202. # Google Coral
  203. # The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
  204. coral_models: "{{base_data_path}}/models/coral_edgetpu"
  205. # Newer models (EfficientDet3x and tf2 mobilenet v2)
  206. tpu_efficientdet_lite3: '{{coral_models}}/efficientdet_lite3_512_ptq_edgetpu.tflite'
  207. tpu_tf2_mobilenetv2: '{{coral_models}}/tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite'
  208.  
  209. tpu_object_weights_mobiledet: '{{coral_models}}/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite'
  210. tpu_object_weights_mobilenetv2: '{{coral_models}}/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'
  211. tpu_object_labels: '{{coral_models}}/coco_indexed.names'
  212. tpu_object_framework: coral_edgetpu
  213. tpu_object_processor: tpu
  214. tpu_min_confidence: 0.6
  215.  
  216. yolo4_models: '{{base_data_path}}/models/yolov4'
  217. # Yolo v4 on GPU (falls back to CPU if no GPU)
  218. yolo4_object_weights: '{{yolo4_models}}/yolov4.weights'
  219. yolo4_object_labels: '{{yolo4_models}}/coco.names'
  220. yolo4_object_config: '{{yolo4_models}}/yolov4.cfg'
  221. yolo4_object_framework: opencv
  222. yolo4_object_processor: gpu
  223. # use half precision floating point as target backend for yolo, on newer cards this may decrease your inferring time
  224. # try without this enabled first a few times to get a baseline and then enable it to see if detections are faster.
  225. # THIS SETTING ONLY APPLIES TO GPU ACCELERATED OPENCV
  226. # read up on 'half precision floating point' and 'CUDA TARGET FP 16
  227. # ** NOTE THIS IS EXPERIMENTAL If you see 'NaN' errors in yolo logs DISABLE this**
  228. #fp16_target: no
  229.  
  230. # Yolo v3 on GPU (falls back to CPU if no GPU)
  231. yolo3_object_weights: '{{base_data_path}}/models/yolov3/yolov3.weights'
  232. yolo3_object_labels: '{{base_data_path}}/models/yolov3/coco.names'
  233. yolo3_object_config: '{{base_data_path}}/models/yolov3/yolov3.cfg'
  234. yolo3_object_framework: opencv
  235. yolo3_object_processor: gpu
  236.  
  237. # Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
  238. tinyyolo_object_config: '{{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg'
  239. tinyyolo_object_weights: '{{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights'
  240. tinyyolo_object_labels: '{{base_data_path}}/models/tinyyolov4/coco.names'
  241. tinyyolo_object_framework: opencv
  242. tinyyolo_object_processor: gpu
  243. ###########################################
  244. # ------ [ FACE MODEL SECTION ] ------
  245. ###########################################
  246. tpu_face_weights_mobilenetv2: '{{coral_models}}/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite'
  247. face_detection_pattern: .*
  248. # cpu or gpu, if gpu isnt available it will default to cpu. You can force CPU usage if you want (?)
  249. face_dlib_processor: gpu
  250.  
  251. face_detection_framework: dlib
  252. face_recognition_framework: dlib
  253. face_num_jitters: 0
  254. face_upsample_times: 0
  255. face_model: cnn
  256. face_train_model: cnn
  257. # 0.5 and lower : more strict, start @ 0.5 and test slowly (Default: 0.6)
  258. face_recog_dist_threshold: 0.6
  259. # I do not recommend changing the algo
  260. face_recog_knn_algo: ball_tree
  261. known_images_path: '{{base_data_path}}/known_faces'
  262. unknown_images_path: '{{base_data_path}}/unknown_faces'
  263. unknown_face_name: Unknown_Face
  264. save_unknown_faces: no
  265. save_unknown_faces_leeway_pixels: 100
  266.  
  267. ###########################################
  268. # ------ [ ALPR MODEL SECTION ] ------
  269. ###########################################
  270. # regex pattern, specify plate numbers!
  271. alpr_detection_pattern: .*
  272.  
  273. #-- Many of the ALPR providers offer both a cloud version
  274. #-- and local SDK version. Sometimes local SDK format differs from
  275. #-- the cloud instance. Set this to local or cloud. (Default: cloud)
  276. # alpr_api_type: local
  277.  
  278. # -----| If you are using plate recognizer | ------
  279. alpr_service: plate_recognizer
  280.  
  281. #-- If you want to host a local SDK https://app.platerecognizer.com/sdk/
  282. #alpr_url: http://192.168.1.21:8080/alpr
  283.  
  284. #-- Plate recog replace with your api key
  285. alpr_key: '{[PLATEREC_ALPR_KEY]}'
  286.  
  287. #-- if yes, then it will log usage statistics of the ALPR service
  288. platerec_stats: yes
  289.  
  290. #-- If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
  291. platerec_regions: [ 'ca' ]
  292.  
  293. #-- minimal confidence for actually detecting a plate
  294. platerec_min_dscore: 0.1
  295.  
  296. #-- minimal confidence for the translated text - OCR in its docs
  297. platerec_min_score: 0.2
  298.  
  299.  
  300. # ----| If you are using openALPR Cloud API |-----
  301. #alpr_service: open_alpr
  302. #alpr_key'{[OPENALPR_ALPR_KEY]}'
  303. #-- For an explanation of params, see http://doc.openalpr.com/api/?api: cloudapi
  304. #openalpr_recognize_vehicle: 1
  305. #openalpr_country: us
  306. #openalpr_state: ca
  307. #-- openalpr returns percents, but we convert to between 0 and 1
  308. #openalpr_min_confidence: 0.3
  309.  
  310.  
  311. # ----| If you are using openALPR command line |-----
  312. #alpr_service: open_alpr_cmdline
  313. openalpr_cmdline_binary: alpr
  314. #-- Do an alpr -help to see options, plug them in here
  315. #-- like say '-j -p ca -c US' etc.
  316. #-- YOU MUST keep the -j its outputs JSON for ZMES to parse
  317. #-- Note that alpr_pattern is honored
  318. #-- For the rest, just stuff them in the cmd line options
  319. openalpr_cmdline_params: -j -d
  320. openalpr_cmdline_min_confidence: 0.3
  321. # *** Remember to play around with openalpr SDK .conf files (you can set libgpu for detector) and also have it
  322. # distort/resize/blur the image x number of times until it finds a match
  323.  
  324.  
  325. ###########################################
  326. # ------ [ PER MONITOR OVERRIDES SECTION ] ------
  327. ###########################################
  328. # You can override ALMOST any parameter on a per monitor basis, there are some illegal keys that would cause behaviour
  329.  
  330. monitors:
  331. 6942069:
  332. #Front Main Stream
  333. # TO IDENTIFY POLYGONS make sure they end with _polygonzone or _polygon_zone
  334. # 1080p polygon
  335. # front_yard_polygonzone: 0,427 1085,261 1075,200 1912,448 1912,1071 0,1079
  336. # 4K polygon
  337. front_yard_polygonzone: 0,877 2170,553 3822,1131 3822,2141 0,2159
  338. front_yard_zone_detection_pattern: (person|dog|cat)
  339. object_detection_pattern: (person|dog|cat)
  340. frame_set: snapshot,70,snapshot,140,210,alarm,280,350,430
  341. model_sequence: object
  342. # sometimes it detects a large 'person', this should stop that.
  343. person_max_detection_size: 65%
  344.  
  345. person_min_confidence: 0.4732
  346. #ignore_past_det_labels: ['dog' , 'cat']
  347. #match_past_detections: yes
  348. #past_det_max_diff_area: 10%
  349. #past_det_max_diff_area: 6784px
  350. #max_detection_size: 90%
  351. #car_past_det_max_diff_area: 45%
  352. #dog_min_confidence: 0.60
  353. #cat_min_confidence: 0.60
  354. #car_min_confidence: 0.60
  355. #truck_min_confidence: 0.60
  356. #person_contained_area: 44%
  357.  
  358. # FUTURE DATA STRUCTURE FOR DEFINED ZONES/POLYGONS
  359. defined zones:
  360. front yard:
  361. pattern: (person|dog|cat)
  362.  
  363. polygons:
  364. # specify the polygon points in clockwise order
  365. # currently supported keys for resolution are listed here in the example config
  366. 4320:
  367. 2160: 0,877 2170,553 3822,1131 3822,2141 0,2159 # AKA 4K
  368. 1440:
  369. 1080: 0,427 1085,261 1075,200 1912,448 1912,1071 0,1079
  370. 720:
  371. 480:
  372. 320:
  373.  
  374.  
  375. ###########################################
  376. # ------ [ MACHINE LEARNING SEQUENCES SECTION ] ------
  377. ###########################################
  378. # 'smart_fps_thresh' -> if you have a frame_set of 'alarm,snapshot,120,180,240,320,380,440,snapshot' and it is a LIVE
  379. # event. If the event is going to end up being 45 seconds long and frame_set calls a frame ID that is 'out of bounds'
  380. # i.e. event frame buffer is only @ 275 and frame_set requested 320, that's an overage of 45 frames. if your fps is 10 that's
  381. # 4.5 seconds over. if smart_fps_thresh is set to 8 (4.5 seconds is inside the 'threshold') it will wait around and
  382. # attempt to keep grabbing the frame up to 3 attempts later with a wait time calculated to be roughly the 8 seconds.
  383. # The default action is to set the frame ID to the last available frame and process that frame instead. That is what
  384. # will happen if the frame ID called is 8+ seconds worth of frames later (FPS is calculated in the script)
  385. # I think 4-8 is a good compromise for speed and being able to process a large spaced out frame set like the example above
  386. smart_fps_thresh: 5
  387.  
  388. # if enabled, will not grab exclusive locks before inferring
  389. # locking seems to cause issues on some unique file systems
  390. disable_locks: no
  391.  
  392. stream_sequence:
  393. # 'most_models' (object+face+alpr), 'most', 'most_unique', 'first'
  394. frame_strategy: '{{frame_strategy}}'
  395. frame_set: '{{frame_set}}'
  396. # ANY of the delay options can be set as xx or xx.yy (finer precision)
  397. # contig attempts and sleep (batches of tries to grab the matching frame)
  398. contig_frames_before_error: 2
  399. delay_between_attempts: 2
  400. # Per each frame
  401. max_attempts: 3 # attempts per frame (this is a 'batch' for above setting)
  402. # delay_between_frames: 0.22532 # frame_set
  403. # delay_between_snapshots takes precedence over the delay_between_frames if there will be a delay from both
  404. delay_between_snapshots: 1 # between snapshot frames, so previous frame has to be a snapshot and so does current
  405. smart_fps_thresh: '{{smart_fps_thresh}}'
  406.  
  407. # save every frame that is sent to the detection models. If you are processing a video or are getting weird results
  408. # turn this on and review the frames in the 'save_frames_dir' directory.
  409. # For thew time being it is your responsibility to clean up the directory after you are done (Script to do daily clean ups coming)
  410. save_frames: 'no' # (Default: no)
  411. save_frames_dir: # (Default: /tmp) - directory to save the 'save_frames' to
  412.  
  413. # When controlling a video file
  414. # start_frame: 1
  415. # frame_skip: 1
  416. # max_frames: 0
  417.  
  418. # If it is an event download mp4 file for the event and process the mp4 file instead of requesting frame by frame
  419. # from the API *** NOTE: You must have 'H264 Passthrough' Video Writer enabled in the monitor settings for this to work
  420. # pre_download: true
  421. # pre_download_dir: # (Default: /tmp) - directory to save the frames into
  422.  
  423. ml_sequence:
  424. general:
  425. model_sequence: '{{model_sequence}}'
  426. disable_locks: '{{disable_locks}}'
  427. match_past_detections: '{{match_past_detections}}'
  428. past_det_max_diff_area: '{{past_det_max_diff_area}}'
  429. # ignore_past_detection_labels: ['dog', 'cat']
  430. # when matching past detections, names in a group are treated the same
  431. # also adding <alias>_min_confidence <alias>_past_det_max_diff_size
  432. # example -> vehicles_min_confidence : 0.66
  433. aliases:
  434. vehicles: [ 'car', 'bus', 'truck', 'boat', 'motorcycle' ]
  435. plants: [ 'broccoli', 'pottedplant', 'potted_plant' ]
  436. animals: [ 'dog','cat','mouse','horse' ]
  437. # NOTE! per label overrides go here in 'general'
  438. # person_min_confidence: '{{person_min_confidence}}'
  439. # car_min_confidence: '{{car_min_confidence}}'
  440. # dog_min_confidence: '{{dog_min_confidence}}'
  441. # person_contained_area: '{{person_contained_area}}'
  442. # car_contained_area: '{{car_contained_area}}'
  443. # dog_contained_area: '{{dog_contained_area}}'
  444. # person_past_det_max_diff_area: '{{person_past_det_max_diff_area}}'
  445. # car_past_det_max_diff_area: '{{car_past_det_max_diff_area}}'
  446. # dog_past_det_max_diff_area: '{{dog_past_det_max_diff_area}}'
  447. # car_max_detection_size: '{{car_max_detection_size}}'
  448. # dog_max_detection_size: '{{dog_max_detection_size}}'
  449. # person_max_detection_size: '{{person_max_detection_size}}'
  450. object:
  451. general:
  452. object_detection_pattern: '{{object_detection_pattern}}'
  453. # 'first', 'most', 'most_unique', ****** 'union'
  454. same_model_sequence_strategy: '{{same_model_sequence_strategy}}'
  455. # HAS to be inside object->general as it only applies to object detection
  456. contained_area: '{{contained_area}}'
  457. sequence:
  458. # First run on TPU with higher confidence
  459. - name: 'coral::SSD-Lite MobileDet 312x312'
  460. enabled: 'no'
  461. object_weights: '{{tpu_object_weights_mobiledet}}'
  462. object_labels: '{{tpu_object_labels}}'
  463. object_min_confidence: '{{tpu_min_confidence}}'
  464. object_framework: '{{tpu_object_framework}}'
  465. tpu_max_processes: '{{tpu_max_processes}}'
  466. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  467. max_detection_size: '{{max_detection_size}}'
  468. # Second try MobileNetv2 object detection to compare to MobileDet results
  469. - name: 'coral::MobileNETv2-SSD 300x300'
  470. enabled: 'no'
  471. object_weights: '{{tpu_object_weights_mobilenetv2}}'
  472. object_labels: '{{tpu_object_labels}}'
  473. object_min_confidence: '{{tpu_min_confidence}}'
  474. object_framework: '{{tpu_object_framework}}'
  475. tpu_max_processes: '{{tpu_max_processes}}'
  476. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  477. max_detection_size: '{{max_detection_size}}'
  478. model_height: 300
  479. model_width: 300
  480. # New models
  481. - name: 'coral::MobileNETv2-SSD TensorFlow 2.0 300x300'
  482. enabled: 'yes'
  483. object_weights: '{{tpu_tf2_mobilenetv2}}'
  484. object_labels: '{{tpu_object_labels}}'
  485. object_min_confidence: '{{tpu_min_confidence}}'
  486. object_framework: '{{tpu_object_framework}}'
  487. tpu_max_processes: '{{tpu_max_processes}}'
  488. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  489. max_detection_size: '{{max_detection_size}}'
  490. model_height: 300
  491. model_width: 300
  492.  
  493. - name: 'coral::EfficientDet-Lite 3 512x512'
  494. enabled: 'yes'
  495. object_weights: '{{tpu_efficientdet_lite3}}'
  496. object_labels: '{{tpu_object_labels}}'
  497. object_min_confidence: '{{tpu_min_confidence}}'
  498. object_framework: '{{tpu_object_framework}}'
  499. tpu_max_processes: '{{tpu_max_processes}}'
  500. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  501. max_detection_size: '{{max_detection_size}}'
  502. model_height: 512
  503. model_width: 512
  504.  
  505. - name: 'DarkNet::v4 Pre-Trained'
  506. enabled: 'yes'
  507. object_config: '{{yolo4_object_config}}'
  508. object_weights: '{{yolo4_object_weights}}'
  509. object_labels: '{{yolo4_object_labels}}'
  510. object_min_confidence: '{{object_min_confidence}}'
  511. object_framework: '{{yolo4_object_framework}}'
  512. object_processor: '{{yolo4_object_processor}}'
  513. gpu_max_processes: '{{gpu_max_processes}}'
  514. gpu_max_lock_wait: '{{gpu_max_lock_wait}}'
  515. cpu_max_processes: '{{cpu_max_processes}}'
  516. cpu_max_lock_wait: '{{cpu_max_lock_wait}}'
  517. # only applies to GPU, default is FP32; *** EXPERIMENTAL ***
  518. # fp16_target: '{{fp16_target}}'
  519. # at current moment this is a global setting turned on by just setting it to : yes
  520. show_models: '{{show_models}}'
  521.  
  522. # AWS Rekognition object detection
  523. # More info: https://medium.com/@michael-ludvig/aws-rekognition-support-for-zoneminder-object-detection-40b71f926a80
  524. - name: 'AWS rekognition (PAID)'
  525. enabled: 'no'
  526. object_framework: 'aws_rekognition'
  527. object_min_confidence: '0.7'
  528. # AWS region unless configured otherwise, e.g. in ~www-data/.aws/config
  529. aws_region: 'us-east-1'
  530. # AWS credentials from /etc/zm/secrets.ini
  531. # unless running on EC2 instance with instance IAM role (which is preferable)
  532. aws_access_key_id: '{[AWS_ACCESS_KEY_ID]}'
  533. aws_secret_access_key: '{[AWS_SECRET_ACCESS_KEY]}'
  534. # no other parameters are required
  535.  
  536. alpr:
  537. general:
  538. # every frame you send is counted as an API hit if using the cloud API
  539. same_model_sequence_strategy: 'first'
  540. # pre_existing_labels: ['car', 'motorbike', 'bus', 'truck', 'boat']
  541. # can make it a reg-ex for certain license plate numbers
  542. alpr_detection_pattern: '{{alpr_detection_pattern}}'
  543. sequence:
  544. # Try openALPR locally first (tweak with per camera openalpr.conf files, pre-warp and calibration, etc.)
  545. # also remember masks for timestamps etc. per camera config files are powerful though
  546. - name: 'openALPR Command Line'
  547. # enabled: 'no'
  548. alpr_service: 'open_alpr_cmdline'
  549. openalpr_cmdline_binary: '{{openalpr_cmdline_binary}}'
  550. openalpr_cmdline_params: '{{openalpr_cmdline_params}}'
  551. openalpr_cmdline_min_confidence: '{{openalpr_cmdline_min_confidence}}'
  552. max_size: '1600'
  553.  
  554. - name: 'Platerecognizer Cloud Service'
  555. enabled: 'no'
  556. # pel_any means as long as there are any detections, pel_none means only if there are no detections yet
  557. # pre_existing_labels: 'pel_any'
  558. # pre_existing_labels: ['car', 'motorbike', 'bus', 'truck', 'boat']
  559. alpr_api_type: 'cloud'
  560. alpr_service: 'plate_recognizer'
  561. alpr_key: '{{alpr_key}}'
  562. platrec_stats: '{{platerec_stats}}'
  563. platerec_min_dscore: '{{platerec_min_dscore}}'
  564. platerec_min_score: '{{platerec_min_score}}'
  565. # max_size: '1600'
  566. platerec_payload:
  567. regions: [ 'ca' ]
  568. # camera_id: 12
  569. # platerec_config:
  570. # region: 'strict'
  571. # mode: 'fast'
  572.  
  573. face:
  574. general:
  575. face_detection_pattern: '{{face_detection_pattern}}'
  576. # combine results below
  577. same_model_sequence_strategy: 'union'
  578. sequence:
  579. - name: 'Face Detection -> coral::MobileNETv2-SSD 320x320'
  580. enabled: 'no'
  581. face_detection_framework: 'tpu'
  582. face_weights: '{{tpu_face_weights_mobilenetv2}}'
  583. face_min_confidence: 0.3
  584. model_height: 320
  585. model_width: 320
  586.  
  587. - name: 'DLib::Face Detection/Recognition'
  588. enabled: 'yes'
  589. # Force CPU detection if you have a GPU (Before dlib used GPU if it was compiled with CUDA support regardless)
  590. # face_dlib_processor: cpu
  591.  
  592. # If you use TPU detection first, we can run this ONLY if TPU detects a face first
  593. # pre_existing_labels: [ 'face' ]
  594. save_unknown_faces: '{{save_unknown_faces}}'
  595. save_unknown_faces_leeway_pixels: '{{save_unknown_faces_leeway_pixels}}'
  596. face_detection_framework: '{{face_detection_framework}}'
  597. known_images_path: '{{known_images_path}}'
  598. unknown_images_path: '{{unknown_images_path}}'
  599. face_model: '{{face_model}}'
  600. face_train_model: '{{face_train_model}}'
  601. face_recog_dist_threshold: '{{face_recog_dist_threshold}}'
  602. face_num_jitters: '{{face_num_jitters}}'
  603. face_upsample_times: '{{face_upsample_times}}'
  604. gpu_max_processes: '{{gpu_max_processes}}'
  605. gpu_max_lock_wait: '{{gpu_max_lock_wait}}'
  606. cpu_max_processes: '{{cpu_max_processes}}'
  607. cpu_max_lock_wait: '{{cpu_max_lock_wait}}'
  608. max_size: 800
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement