Advertisement
Guest User

objectconfig

a guest
Nov 20th, 2021
1,266
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 45.00 KB | None | 0 0
  1. ---
  2. # Configuration file for object detection
  3. # *** NOTE: zmeventnotification.ini and secrets.ini are for configuring the ZMEventnotification.pl (Perl) daemon
  4. # that reads the shared memory to find new events that are happening in ZM. objectconfig.yml and its secrets.yml
  5. # are strictly for the object detection part of ZMES.
  6.  
  7. # NOTE: Most of these base config keys can be overridden
  8. # on a per-monitor basis if you want. Just
  9. # duplicate it inside the correct monitor: section, Example ->
  10.  
  11. # This is in the base config (not nested inside the 'monitors' section)
  12. # my_thing : 'ABC Easy as one, two, three'
  13. # my_other_thing : '{{my_thing}} - Ah, simple as Do Re Mi - ABC, one, two, three - Baby, you and me, girl'
  14.  
  15. # You can override my_thing in the per monitor overrides to allow different things for different monitors using the same 'key'
  16. # *** NOTE: always quote the strings that {{vars}} or {[secrets]} are in, even if it is the only thing in the string.
  17. #monitors:
  18. # Notice this is nested inside the 'monitors' section
  19. # 1:
  20. # If the detection is running on monitor 1 then these options will be used instead of the base ones
  21. # my_thing : 'Generals gathered in their masses - Just like witches at black masses'
  22. # my_other_thing : '{{my_thing}} - Evil minds that plot destruction - Sorcerer of death's construction'
  23.  
  24. # 2:
  25. # my_thing : Hello
  26. # NOTICE that the {{var}} is quoted even though it is all by itself
  27. # my_other_thing : '{{my_thing}}, World!'
  28.  
  29. ###########################################
  30. # ------ [ General SECTION ] ------
  31. ###########################################
  32. # base data path for various files the Event Server + Object Detection 'hooks' need
  33. # ** IMPORTANT: Use absolute paths for base_data_path - you can build all the other paths using base_data_path if you want.
  34. base_data_path: /var/lib/zmeventnotification
  35.  
  36. # This is an optional file
  37. # If specified, you can specify tokens with secret values in that file
  38. # and only refer to the tokens in your main config file ** !SECRETS are now {[SECRETS]} **
  39. secrets: /etc/zm/zm_secrets.yml
  40.  
  41. # Push notification customized script. See the provided gotify_zmes.sh for an example and what ARGS are passed.
  42. # When install.sh was run it installed this example gotify script to the {{base_data_path}}/bin directory (you can modify install.sh to install to a different dir).
  43. # THIS IS a working example, you can make your own shell script and use the ARGS however you like. If you want the logger to record that
  44. # your custom script was a success you need to echo out a 0 for success or anything else for failure.
  45. # (Default: no)
  46. custom_push: no
  47. custom_push_script: '{{base_data_path}}/bin/gotify_zmes.sh'
  48.  
  49. # comma split list of monitor IDs to skip processing hooks on.
  50. # I put it here, so you don't have to restart zmeventnotification.pl to have the changes take effect, you can change these every detection at the moment
  51. # If it is a PAST event it will not skip the monitor, this only applies to LIVE events
  52. #skip_mons : 1,6,9
  53.  
  54. # FOR DEBUGGING -- force LIVE EVENT logic
  55. # force_live: yes
  56.  
  57.  
  58. # If yes, it will attempt to hide all traces of your PORTAl url in logs as well as hide/obfuscate tokens and passwords
  59. # With this you can copy and paste logs without having to comb through and sanitize sensitive information.
  60. # (Default: no)
  61. sanitize_logs: no
  62.  
  63. # The string to show when the logs are sanitized instead of the sensitive info (Default: <sanitized>)
  64. #sanitize_str: <obfuscated>
  65.  
  66. # This is a useful debugging trick. If you are chaining models and want to know which
  67. # model detected an object, make this yes. When yes, it will prefix the model name before the
  68. # detected object. Example: Instead of 'person', it will say 'person(yolo[gpu])' or 'person(tpu)'
  69. show_models: no
  70.  
  71. # save 2 images to disk, the 'training' frame is the bare frame that ZM sends. You can then Label the image and use it to train a custom model.
  72. # the other frame is objdetect.jpg so you can 'compare' what the current model found and what you want it/not want it to find.
  73. # format is <event ID>-training-<frame ID>.jpg and <event ID>-compare-<frame ID>.jpg
  74. # Default path is base_data_path/images make sure the user www-data or apache has write permissions to the directory.
  75. save_image_train: no
  76. save_image_train_dir: /nas/images/yolo_train
  77.  
  78.  
  79. # The logic has been re-written to be performant, things are skipped for speed if possible. This tells the script to evaluate all the logic even
  80. # if the script finds that there is already a previous detection (and if it is the exact same results as before).
  81. # This allows the script to consider overwriting objdetect.jpg. For animations and pushover notifications to be forced
  82. # you need to go to their specific section and enable the 'force_animation" or "force_xxxx" options.
  83. # (Default: no)
  84. force_debug: no
  85.  
  86. # add the object with the highest confidence in same_model_sequence_strategy to the detections (Default: no)
  87. # *** NOTE: EXPERIMENTAL!
  88. #same_model_high_conf : yes
  89.  
  90. # sequence of models to run for detection, the sequence follows the order you specify.
  91. model_sequence : object, face
  92. #model_sequence : object, face, alpr
  93. #model_sequence : face, object
  94.  
  95. # Frames to run detections on. Default is snapshot, alarm, snapshot
  96. frame_set : snapshot, alarm, snapshot
  97. #frame_set : snapshot, 70, snapshot, 120, 150, alarm
  98.  
  99.  
  100.  
  101. # You can now limit the # of detection process
  102. # per target processor. If not specified, default is 1
  103. # Other detection processes will wait to acquire lock
  104. # Be careful as this can use up a lot of memory on your GPU (watch -n .5 nvidia-smi)
  105. cpu_max_processes: 3
  106. tpu_max_processes: 3
  107. gpu_max_processes: 3
  108.  
  109. # Time to wait in seconds per processor to be free, before
  110. # erring out. Default is 120 (2 minutes)
  111. cpu_max_lock_wait: 100
  112. tpu_max_lock_wait: 100
  113. gpu_max_lock_wait: 100
  114.  
  115. # This set up will keep debug logging on for now, explore the TERM helpers.txt file in the 'tools' dir and the shell
  116. # functions 'es.debug.objdet' and 'es.baredebug.objdet' to see how to use them. I Also reccomend installing 'bat' for linux
  117. # and using the aliases that are piped out to bat to view the logs in color coded and themed output.
  118. pyzm_overrides:
  119. # levels
  120. log_level_syslog: 5
  121. log_level_file: 5
  122. log_level_debug: 5
  123. # dont log to the DB (-5)
  124. log_level_db: -5
  125. # log levels -> 1 dbg/print/blank 0 info, -1 warn, -2 err, -3 fatal, -4 panic, -5 off (only for log_debug_file)
  126. log_debug_file: 1
  127. log_debug: True
  128. log_debug_target: _zmes
  129.  
  130. # api portal is needed if you plan to use tokens to get images
  131. # requires ZM 1.33 or above
  132. api_portal: '{[ZM_API_PORTAL]}'
  133. # portal/user/password are needed if you plan on using ZM's legacy
  134. # auth mechanism to get images
  135. portal: '{[ZM_PORTAL]}'
  136. user: '{[ZM_USER]}'
  137. password: '{[ZM_PASSWORD]}'
  138. # If you need basic auth to access ZM
  139. #basic_user: user
  140. #basic_password: password
  141.  
  142. # Self-signed HTTPS certs
  143. allow_self_signed: yes
  144.  
  145.  
  146. # Instead of specifying _polygonzone in the per monitor overrides you can import the zones you already created in ZM
  147. import_zm_zones: no
  148. # This only works when you import ZM Zones, by activating this it will, by default, import ZM zones.
  149. # the detected object is only a match if the object is inside the zone that raised the alarm.
  150. only_triggered_zm_zones: no
  151.  
  152. # This saves the previous detection results and compares the new detection to it. You can specify how much
  153. # an object has to of moved from its previously detected position to not be filtered out. This helps if you have a car
  154. # in the driveway and you keep being notified about a car in the driveway. ** The new logix IS EXPERIMENTAL **
  155. # when the new logic is polished it will work much better.
  156. match_past_detections: no
  157. # The max difference in area between the previously detected object if match_past_detection is on
  158. # can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
  159. # object can differ ever so slightly between detections. Contributor @neillbell put in this PR
  160. # to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
  161. # so if the script detects a car in the same spot but the bounding boxes are not in the exact same position, if its
  162. # within %5 of its previously detected position it will be considered 'in the same spot' and you won't be notified.
  163. # Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
  164. # example:
  165. # person_past_det_max_diff_area: 5%
  166. # car_past_det_max_diff_area: 5000px
  167. past_det_max_diff_area: 5%
  168.  
  169. # this is the maximum size a detected object can have compared to whole image. You can specify it in px or %
  170. # This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
  171. # I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
  172. # and therefore I set mine to 70% because I know any valid detected objected cannot be larger than 70% of the image.
  173.  
  174. #max_detection_size: 90%
  175.  
  176. # How much area the detected object must take up inside the polygon/zone, also per object settings
  177. # person_contained_area: 50% means person must have 50% of its bounding boxes area inside the polygon or zone
  178. # 1 pixel will behave like the source repos version, any part of the detected object inside of the polygon/zone is a match
  179. contained_area: 1px
  180. #contained_area: 10%
  181.  
  182. # this is the Width to resize the image before analysis is done, aspect ratio is preserved.
  183. # play around if you want, I have personally seen different results using resize. Remember that before the image is
  184. # handed to the model to process, it is resized based on 'model_width' and 'model_height'. So resizing before hand
  185. # can bring interesting results. A whole number or 'no' allowed, 123 is ok, 123.xx is not.
  186. resize: no
  187.  
  188. # place a timestamp on objdetect.jpg
  189. # this is helpful when storing events as mp4 and the frames do not have timestamps from ZM
  190. picture_timestamp:
  191. enabled: no
  192. date format: '%Y-%m-%d %H:%M:%S'
  193. # appends the monitor name and ID to the timestamp
  194. monitor name: yes
  195. text color: (255,255,255) # BGR not RGB - Default: (255,255,255) aka white
  196. # background is the solid, colored rectangle that the timestamp text is printed on
  197. background: yes
  198. bg color: (0,0,0) # Default: (0,0,0) aka black
  199.  
  200. ########################### REIMPLEMENT THESE
  201. # set to yes, if you want to remove images after analysis
  202. # setting to yes is recommended to avoid filling up space
  203. # keep to no while debugging/inspecting masks
  204. # Note this does NOT delete debug images later
  205. #delete_after_analyze: yes
  206.  
  207. # If yes, will write an image called <filename>-bbox.jpg as well
  208. # which contains the bounding boxes. This has NO relation to
  209. # write_image_to_zm
  210. # Typically, if you enable delete_after_analyze you may
  211. # also want to set write_debug_image to no.
  212. #write_debug_image: no
  213.  
  214. # if yes, will write an image with bounding boxes
  215. # this needs to be yes to be able to write a bounding box
  216. # image to ZoneMinder that is visible from its console
  217. #write_image_to_zm: yes
  218.  
  219.  
  220. # Adds confidence percentage to the labels drawn onto the matching frame
  221. # (person) becomes (person 97%)
  222. # hog/face shows 100% always
  223. show_percent: yes
  224.  
  225. # Draw the polygon/zone on objdetect.jpg, this is handy to make sure that the polygons you defined
  226. # in the per monitor overrides are actually what you want them to be.
  227. draw_poly_zone: no
  228. # color to be used to draw the polygons you specified, BGR not RGB
  229. poly_color: (100,0,255)
  230. # thickness of the line used to draw the polygon/zone
  231. poly_thickness: 2
  232. # Draw red bounding boxes around objects that were filtered out (useful for debugging)
  233. show_filtered_detections: no
  234. # If this is yes then objects that were filtered out due to be under the minimum confidence level will have red bounding
  235. # boxes drawn around them IF you have show_filtered_detections enabled. I found this caused a lot of 'noise' so I made
  236. # it configurable. This could be helpful if you are testing your own trained models.
  237. show_conf_filtered: no
  238.  
  239. ###########################################
  240. # ------ [ HOME ASSISTANT ADD-ON SECTION ] ------
  241. ###########################################
  242. # this is the main setting, you need to enable using per monitor home assistant sensors to check their state.
  243. # You can control sending pushover notifications at all and also the 'cool down' time in between pushover messages.
  244. # I made this for performance reasons, the old api push script has to wait for this script to reply and for the perl script
  245. # to use it. I tried AppDaemon and some other workarounds but this solved all my problems.
  246. # For now there are 2 types of sensors created using the 'Helpers'. Bool and Input Text. Bool is the on/off switch. Text
  247. # input is used for 'float' numbers (30.54, etc.). the number represents how long in seconds to not allow pushover
  248. # notifications to be sent (per monitor).
  249. # Enable add-on. Default: no
  250. hass_enable: no
  251. # set schema here too (http:// or https://)
  252. hass_server: '{[HA_SERVER]}'
  253. # long lived token created for this purpose
  254. hass_token : '{[HA_TOKEN]}'
  255.  
  256. # The gist is to create a bool and text input helper for each monitor that you can configure in the Home Assistant
  257. # front end. Put the names of the sensors in each monitor' override section so the script queries the correct sensor.
  258.  
  259. # person sensor support is coming soon
  260. #hass_people:
  261. # mike: 'person.mike'
  262. # maryanne: 'person.maryanne'
  263.  
  264. # HOME ASSISTANT add-on sensors, make these the actual name of the sensors, If you specify it here this will be the
  265. # 'global default'. You can override per monitor as well. If you dont use HA you can use push_cooldown to control
  266. # the cooldown between pushover messages.
  267.  
  268. # hass_notify: input_boolean.driveway_pushover
  269. # hass_cooldown: input_number.pushover_cooldown_driveway
  270.  
  271.  
  272.  
  273. # BE AWARE GOTIFY IS FASTER, albeit with less features. You could use gotify as the 'first message' to be alerted quickly
  274. # Pushover but integrated directly into the detection script so it can be sent as fast as possible, by default it will
  275. # look for a GIF first then objdetect.jpg. If it doesnt find either then it goes for snapshot then alarm. If you arent
  276. # creating animations it will send a picture of the best matched frame with bounding boxes and labels. Setup push_token
  277. # and push_user_key to get either a jpg or gif notification based on if you create animations or not. There is also
  278. # an advanced setup where you can send the JPG AND GIF in 2 separate messages to either the same APP_TOKEN or jpg
  279. # notification to 1 and gif to another. I have setup a URL link inside the pushover notifications that when clicked will
  280. # let you view the event in a browser window using AUTH, AUTH defaults to using the same user/pass that you use for
  281. # ZMES or you can override that user and pass by setting push_user and push_pass
  282. # I recommend making an API user named 'pushover_url' or 'pushover_viewonly' and only giving it 'View' permission for
  283. # Events and Stream. The credentials/token are inside the notification payload and it is sent over https to pushover.
  284. ###########################################
  285. # ------ [ PUSHOVER ADD-ON SECTION ] ------
  286. ###########################################
  287. # Enable the PushOver python add-on? (Default: no)
  288. # Remember to turn off the api_push in zmeventnotification.ini or you will receive 2 different messages
  289. push_enable: no
  290. # Force sending a pushover notification when debugging a PAST event
  291. push_force: no
  292. # Pushover default or only User Key and App Token
  293. push_token: '{[PUSHOVER_APP_TOKEN]}'
  294. push_key: '{[PUSHOVER_USER_KEY]}'
  295.  
  296. # Custom sound for pushover notifications (NOTE: has to be setup in your pushover account first) (Default: None)
  297. #push_sound: tugboat
  298.  
  299. # -------------------------------------------------------------
  300. # Show a clickable link to view the event in a browser, this is handy if the pushover notification goes out to
  301. # a device without zmNinja installed, they can just click the link and view the event in a regular browser.
  302. # NOTE: Your ZM server must be accessible externally for this to work correctly, its super handy to just click the link
  303. # (Default: no)
  304. push_url: no
  305.  
  306. # The ZM API user for the clickable URL link in the pushover notification. I HIGHLY recommend https on your ZM host,
  307. # making a user with VIEW privileges of stream and events only and using that for push_user and pass
  308. # example: make a user named 'push_view' and VIEW privs only for STREAM and EVENT
  309. push_user: '{[PUSHOVER_USER]}'
  310. push_pass: '{[PUSHOVER_PASS]}'
  311. # -------------------------------------------------------------
  312.  
  313. #-- Send a pushover notification if TPU or GPU errors are detected, i.e. 'delegate' error for tpu or
  314. #-- 'cant convert float() infinite to int()' or '(GPU-API-217)' for yolo. (Default: no)
  315. push_errors : no
  316. push_err_token: '{[PUSHOVER_ERR_TOKEN]}'
  317. push_err_key: '{[PUSHOVER_USER_KEY]}'
  318. #push_err_device: My-S21
  319. push_error_sound:
  320.  
  321. # *** Only enable push_jpg and push_gif if you have create_animations: yes ***
  322. # If you have animation enabled and want a push sent with the jpg (FAST notification) set this option up with the APP TOKEN for each type of notification
  323. # I have 2 different Pushover apps with 2 different APP TOKENS 1 channel is for the jpegs because the notifications are as fast as it gets
  324. # and I have another channel for the GIF. You could also put the same app token as push_user in both jpg and gif and that pushover app will receive both a jpg and then a gif
  325.  
  326. #push_jpg: '{[PUSHOVER_JPG]}'
  327. # Different user key if using groups, comment out to use default user key -> push_key
  328. #push_jpg_key: '{[PUSHOVER_JPG_KEY]}'
  329.  
  330. #push_gif: '{[PUSHOVER_GIF]}'
  331. # Different user key if using groups, comment out to use default user key -> push_key
  332. #push_gif_key: '{[PUSHOVER_GIF_KEY]}'
  333.  
  334. # If debugging (using es.debug.objdet) a PAST event then only send pushover notifications to this device
  335. # (Default: None)
  336. #push_debug_device : my-Note10
  337.  
  338. # If you do not use Home Assistant and want to control the cooldown between pushover notifications
  339. # set this to how many seconds. Use it in each monitor's section to set the cooldown per monitor
  340. # This will be the global setting that can be overridden in the per monitor section (Default: None)
  341.  
  342. #push_cooldown: 120
  343.  
  344. ###########################################
  345. # ------ [ MQTT ADD-ON SECTION ] ------
  346. ###########################################
  347. # use python mqtt client to send alarm,snapshot,objdetect(.jpg/.gif) to zmes/picture/<monitor id> topic (Default: no)
  348. # useful if you setup a MQTT camera in Home Assistant.
  349. # Options to use no encryption or secure/insecure TLS/mTLS
  350. # Default ports: for TCP: non-TLS: 1883 TLS:8883 .
  351. mqtt_enable: no
  352.  
  353. # Force mqtt to send pic and data even if its a PAST event
  354. mqtt_force: no
  355.  
  356. # Allow you to set a custom MQTT topic name, formats for topics are: name/sub-name/sub-sub-name
  357. # notice no leading or trailing '/'
  358. # python mqtt default topic: zmes
  359. #mqtt_topic : myown_topic/here
  360.  
  361. # if using tls remember about host verification (tls_insecure : no host verification but still encrypted)
  362. #mqtt_broker : brokers.hostname
  363.  
  364. # Only use this if not using standard tcp ports, it defaults to 1883 if no TLS and 8883 if TLS, this setting will override
  365. #mqtt_port : 1234
  366. # MQTT Credentials if enabled in broker
  367. mqtt_user : '{[MQTT_USERNAME]}'
  368. mqtt_pass : '{[MQTT_PASSWORD]}'
  369.  
  370. # MQTT over TLS
  371. # Location to MQTT broker CA certificate. Uncomment this line will enable MQTT over TLS.
  372. # Strict certificate checking (Default: no)
  373. mqtt_tls_allow_self_signed : yes
  374.  
  375. # To allow insecure TLS - disable peer verifier/don't verify hostname in COMMON NAME (CN: field), (Default: no)
  376. # if using ip address in cert's COMMON NAME field then this needs to be 'yes'
  377. mqtt_tls_insecure : yes
  378.  
  379. # CA certificate
  380. # mTLS CA (self signed?)
  381. tls_ca : /path_to/mqtt_certs/ca.crt
  382. # TLS CA (LetsEncrypt?)
  383. #tls_ca : /etc/certs/fullchain.pem
  384.  
  385. # Here is a good guide on setting up a CA and signing server/client certificates for MQTT, even if your using mqtt over your LAN only,
  386. # it is always good to enable encryption and learn about it -> http://www.steves-internet-guide.com/creating-and-using-client-certificates-with-mqtt-and-mosquitto/
  387. # I DO NOT RECOMMEND using Home Assistant MQTT broker add-on as its a nightmare to get TLS working. (I am still unable to get the MQTT integration to connect to my broker using TLS)
  388. # I run an MQTT mosquitto broker on my ZM host and hass connects to that over unencrypted connection.
  389. # To enable 2-ways TLS, add client certificate and private key, Meaning you had a CA sign your brokers server key/cert
  390. # and also had the CA sign the client key/cert that you are using here
  391. # Location to client certificate and private key
  392.  
  393. #tls_cert : /path_to/mqtt_certs/client-zm.crt
  394. #tls_key : /path_to/mqtt_certs/client-zm.key
  395.  
  396.  
  397. ###########################################
  398. # ------ [ ANIMATION SECTION ] ------
  399. ###########################################
  400.  
  401. # This section gives you an option to get brief animations
  402. # of the event, delivered as part of the push notification to mobile devices
  403. # Animations are created only if an object is detected
  404. #
  405. # NOTE: This will DELAY the time taken to send you push notifications
  406. # It will try to first create the animation, which may take upto a minute
  407. # depending on how soon it gets access to frames. See notes below
  408. # NOW INCLUDES, first bit of frames are annotated with bounding boxes and label and a TIMESTAMP - MONITOR_NAME
  409. # overlay is included in top left corner. Also now a THREADED background process so sending notifications is faster{[]}
  410. # If yes, object detection will attempt to create
  411. # a short GIF file around the object detection frame
  412. # that can be sent via push notifications for instant playback, Pushover size limit limit: 2.5MB
  413. # (Default:no)
  414.  
  415. # TO MAKE THINGS FAST! make sure the monitors are saving their events as JPEG in 'storage'. If using mp4 video passthrough
  416. # detections and animations are WAY slower because ZM needs to build the MP4 and then send the frames from the MP4
  417. # JPEG storage will give us the frames in real time as ZM reads them, try it out, the performance increase is astounding.
  418. create_animation: no
  419.  
  420. # if animation already exists force write a new one (Default: no)
  421. #force_animation: yes
  422.  
  423. # place a timestamp on the animations - Customizations are WIP (diff timestamp format, no monitor name)
  424. animation_timestamp:
  425. enabled: no # Default: yes
  426. # Make sure to quote the timestamp string!
  427. date format: '%Y-%m-%d %H:%M:%S' # Default: '%Y-%m-%d %H:%M:%S'
  428. # Add the monitor name and monitor ID to the timestamp
  429. monitor id: yes # Default: yes
  430. # BGR not RGB
  431. text color: (255,255,255) # Default: (255,255,255) aka white
  432. # background is the solid colored rectangle that the timestamp text is printed on
  433. background: yes # Default: yes
  434. # BGR not RGB
  435. bg color: (0,0,0) # Default: (0,0,0) aka black
  436.  
  437. # Format of animation burst
  438. # valid options are "mp4", "gif", "mp4,gif"
  439. # Note that gifs will be of a shorter duration
  440. # as they take up much more disk space than mp4
  441. # BUT you can enable fast_gif
  442. animation_types: 'mp4,gif'
  443.  
  444. # if animation_types is gif then when can generate a fast preview gif
  445. # every second frame is skipped and the frame rate doubled
  446. # so you end up with a longer timeframe sped up (Default: no)
  447. fast_gif: no
  448.  
  449. # default width of animation image. Be cautious when you increase this
  450. # most mobile platforms give a very brief amount of time (in seconds)
  451. # to download the image.
  452. # Given your ZM instance will be serving the image, it will be slow anyway
  453. # Making the total animation size bigger can result in the notification not
  454. # getting an image at all (timed out)
  455. animation_width: 640
  456.  
  457. # When an event is detected, ZM writes frames a little late
  458. # On top of that, it looks like with caching enabled, the API layer doesn't
  459. # get access to DB records for much longer (around 30 seconds), at least on my
  460. # system. animation_retry_sleep refers to how long to wait before trying to grab
  461. # frame information if it failed. animation_max_tries defines how many times it
  462. # will try and retrieve frames before it gives up
  463. animation_retry_sleep: 3
  464. animation_max_tries: 8
  465.  
  466. ###########################################
  467. # ------ [ MLAPI / REMOTE SECTION ] ------
  468. ###########################################
  469. # You can now run the machine learning code on a different server
  470. # This frees up your ZM server for other things
  471. # To do this, you need to setup https://github.com/baudneo/mlapi
  472. # on your desired server and configure it with a DB user (python3 mlapi_dbuser.py). See its instructions
  473.  
  474. # This is the MASTER on/off switch for enabling communication to MLAPI, it must be yes/on/1/true to be enabled
  475. ml_enable: yes
  476.  
  477. # If the mlapi routes all fail, you can still run the machine learning code locally
  478. ml_fallback_local: no
  479.  
  480. # encrypted credentials setup
  481. ml_routes :
  482. # weight of route, lower is more important.
  483. - weight: 0
  484. # NAME and KEY must be the same here and in the mlapi zmes_keys option for decryption to know what key to grab
  485. name: 'mlapi_one'
  486. # enabled: true # Adding support for this next push
  487.  
  488. gateway: 'http://192.168.1.100:5002/api/v1'
  489. # The user to login to mlapi (one of the MLAPI DB user' created using mlapi_dbuser.py)
  490. user: '{[ML_USER]}'
  491. pass: '{[ML_PASSWORD]}'
  492. # The key to use for encryption
  493. enc_key: '{[mlapi_one_key]}'
  494. # No longer need to pass credentials to mlapi, the JWT AUTH token is passed (encrypted) to it instead
  495.  
  496. # - weight: 1
  497. # name: some other host
  498. # gateway: xxxxxxxx
  499.  
  500.  
  501. ###########################################
  502. # ------ [ OBJECT MODEL SECTION ] ------
  503. ###########################################
  504.  
  505. object_detection_pattern: (person|car|motorbike|bus|truck|boat|dog|cat)
  506. object_min_confidence: 0.6
  507.  
  508. # Google Coral
  509. coral_models: "{{base_data_path}}/models/coral_edgeptu"
  510. # Newer models (EfficientDet3x and tf2 mobilenet v2)
  511. tpu_efficientdet_lite3: '{{coral_models}}/efficientdet_lite3_512_ptq_edgetpu.tflite'
  512. tpu_tf2_mobilenetv2: '{{coral_models}}/tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite'
  513. # The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
  514. tpu_object_weights_mobiledet: '{{coral_models}}/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite'
  515. tpu_object_weights_mobilenetv2: '{{coral_models}}/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'
  516.  
  517. tpu_object_labels: '{{coral_models}}/coco_indexed.names'
  518. tpu_object_framework: coral_edgetpu
  519. tpu_object_processor: tpu
  520. tpu_min_confidence: 0.6
  521.  
  522. yolo4_models: '{{base_data_path}}/models/yolov4'
  523. # Yolo v4 on GPU (falls back to CPU if no GPU)
  524. yolo4_object_weights: '{{yolo4_models}}/yolov4.weights'
  525. yolo4_object_labels: '{{yolo4_models}}/coco.names'
  526. yolo4_object_config: '{{yolo4_models}}/yolov4.cfg'
  527. yolo4_object_framework: opencv
  528. yolo4_object_processor: gpu
  529. # use half precision floating point as target backend for yolo, on newer cards this may decrease your inferring time
  530. # try without this enabled first a few times to get a baseline and then enable it to see if detections are faster.
  531. # THIS SETTING ONLY APPLIES TO GPU ACCELERATED OPENCV
  532. # read up on 'half precision floating point' and 'CUDA TARGET FP 16
  533. # ** NOTE THIS IS EXPERIMENTAL **
  534. #fp16_target: no
  535.  
  536.  
  537. # Yolo v3 on GPU (falls back to CPU if no GPU)
  538. yolo3_object_weights: '{{base_data_path}}/models/yolov3/yolov3.weights'
  539. yolo3_object_labels: '{{base_data_path}}/models/yolov3/coco.names'
  540. yolo3_object_config: '{{base_data_path}}/models/yolov3/yolov3.cfg'
  541. yolo3_object_framework: opencv
  542. yolo3_object_processor: gpu
  543.  
  544. # Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
  545. tinyyolo_object_config: '{{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg'
  546. tinyyolo_object_weights: '{{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights'
  547. tinyyolo_object_labels: '{{base_data_path}}/models/tinyyolov4/coco.names'
  548. tinyyolo_object_framework: opencv
  549. tinyyolo_object_processor: gpu
  550.  
  551. ###########################################
  552. # ------ [ FACE MODEL SECTION ] ------
  553. ###########################################
  554. tpu_face_weights_mobilenetv2: '{{coral_models}}/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite'
  555. face_detection_pattern: .*
  556. # cpu or gpu, if gpu isnt available it will default to cpu. You can force CPU usage
  557. face_dlib_processor: gpu
  558.  
  559. face_detection_framework: dlib
  560. face_recognition_framework: dlib
  561. face_num_jitters: 0
  562. face_upsample_times: 0
  563. face_model: cnn
  564. face_train_model: cnn
  565. # 0.5 and lower : more strict, start @ 0.5 and test slowly (Default: 0.6)
  566. face_recog_dist_threshold: 0.6
  567. # I do not recommend changing the algo
  568. face_recog_knn_algo: ball_tree
  569. known_images_path: '{{base_data_path}}/known_faces'
  570. unknown_images_path: '{{base_data_path}}/unknown_faces'
  571. unknown_face_name: Unknown_Face
  572. save_unknown_faces: no
  573. save_unknown_faces_leeway_pixels: 100
  574.  
  575. ###########################################
  576. # ------ [ ALPR MODEL SECTION ] ------
  577. ###########################################
  578. # regex pattern, specify plate numbers!
  579. alpr_detection_pattern: .*
  580.  
  581. #-- Many of the ALPR providers offer both a cloud version
  582. #-- and local SDK version. Sometimes local SDK format differs from
  583. #-- the cloud instance. Set this to local or cloud. (Default: cloud)
  584. # alpr_api_type: local
  585.  
  586. # -----| If you are using plate recognizer | ------
  587. alpr_service: plate_recognizer
  588.  
  589. #-- If you want to host a local SDK https://app.platerecognizer.com/sdk/
  590. #alpr_url: http://192.168.1.21:8080/alpr
  591.  
  592. #-- Plate recog replace with your api key
  593. alpr_key: '{[PLATEREC_ALPR_KEY]}'
  594.  
  595. #-- if yes, then it will log usage statistics of the ALPR service
  596. platerec_stats: yes
  597.  
  598. #-- If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
  599. platerec_regions: [ 'ca' ]
  600.  
  601. #-- minimal confidence for actually detecting a plate
  602. platerec_min_dscore: 0.1
  603.  
  604. #-- minimal confidence for the translated text - OCR in its docs
  605. platerec_min_score: 0.2
  606.  
  607.  
  608. # ----| If you are using openALPR Cloud API |-----
  609. #alpr_service: open_alpr
  610. #alpr_key'{[OPENALPR_ALPR_KEY]}'
  611. #-- For an explanation of params, see http://doc.openalpr.com/api/?api: cloudapi
  612. #openalpr_recognize_vehicle: 1
  613. #openalpr_country: us
  614. #openalpr_state: ca
  615. #-- openalpr returns percents, but we convert to between 0 and 1
  616. #openalpr_min_confidence: 0.3
  617.  
  618.  
  619. # ----| If you are using openALPR command line |-----
  620. #alpr_service: open_alpr_cmdline
  621. openalpr_cmdline_binary: alpr
  622. #-- Do an alpr -help to see options, plug them in here
  623. #-- like say '-j -p ca -c US' etc.
  624. #-- YOU MUST keep the -j its outputs JSON for ZMES to parse
  625. #-- Note that alpr_pattern is honored
  626. #-- For the rest, just stuff them in the cmd line options
  627. openalpr_cmdline_params: -j -d
  628. openalpr_cmdline_min_confidence: 0.3
  629. # *** Remember to play around with openalpr SDK .conf files (you can set libgpu for detector) and also have it
  630. # distort/resize/blur the image x number of times until it finds a match
  631.  
  632.  
  633. ###########################################
  634. # ------ [ PER MONITOR OVERRIDES SECTION ] ------
  635. ###########################################
  636. # You can override ALMOST any parameter on a per monitor basis, there are some illegal keys that would cause behaviour
  637.  
  638. monitors:
  639. 6942069:
  640. #Front Main Stream
  641. # TO IDENTIFY POLYGONS make sure they end with _polygonzone or _polygon_zone
  642. # 1080p polygon
  643. # front_yard_polygonzone: 0,427 1085,261 1075,200 1912,448 1912,1071 0,1079
  644. # 4K polygon
  645. front_yard_polygonzone: 0,877 2170,553 3822,1131 3822,2141 0,2159
  646. front_yard_zone_detection_pattern: (person|dog|cat)
  647. object_detection_pattern: (person|dog|cat)
  648. frame_set: snapshot,70,snapshot,140,210,alarm,280,350,430
  649. model_sequence: object
  650. # sometimes it detects a large 'person', this should stop that.
  651. person_max_detection_size: 65%
  652.  
  653. person_min_confidence: 0.4732
  654. #ignore_past_det_labels: ['dog' , 'cat']
  655. #match_past_detections: yes
  656. #past_det_max_diff_area: 10%
  657. #past_det_max_diff_area: 6784px
  658. #max_detection_size: 90%
  659. #car_past_det_max_diff_area: 45%
  660. #dog_min_confidence: 0.60
  661. #cat_min_confidence: 0.60
  662. #car_min_confidence: 0.60
  663. #truck_min_confidence: 0.60
  664. #person_contained_area: 44%
  665.  
  666. # HA add-on sensors
  667. hass_notify: input_boolean.front_switch
  668. hass_cooldown: input_number.front_cooldown
  669. # Future addition for 'person'
  670. # hass_person:
  671. # - giuseppe
  672. # - vinchenzo
  673. # If you do not use HA to control the pushover sensors you can control the cooldown with this option
  674. # push_cooldown: 300
  675. # custom_push_script: '/home/me/mycoolscript.sh' # see zmes_gotify.sh for the ARGS that are passed from zmes
  676.  
  677. # Pushover custom sounds for this monitor
  678. # DEFAULT SOUND
  679. push_sound: motion_frontyard
  680. # Sound when a person is in the detected objects
  681. # If there is more than 1 object that has a custom sound there is a hierarchy. 'person' takes priority over all
  682. push_sound_person: person_frontyard
  683. push_sound_car:
  684. push_sound_truck:
  685. push_sound_motorbike:
  686. push_sound_dog:
  687. push_sound_cat:
  688.  
  689. # FUTURE DATA STRUCTURE FOR DEFINED ZONES AKA POLYGONS AKA ZONES
  690. defined zones:
  691. # specify the polygon points in clockwise order
  692. front_walkway:
  693. pattern: (person|dog|cat)
  694. polygon_hw: h,w # or w,h
  695. 3840x2160: 0,877 2170,553 3822,1131 3822,2141 0,2159 # AKA 4K
  696. 1920x1080: 0,427 1085,261 1075,200 1912,448 1912,1071 0,1079
  697. #1080x1920: 0,427 1085,261 1075,200 1912,448 1912,1071 0,1079 # depending on your polygon_hw
  698. 1280x720:
  699. 480:
  700. 320:
  701. front_porch:
  702. 1080:
  703. 720:
  704. 320:
  705.  
  706.  
  707. ###########################################
  708. # ------ [ MACHINE LEARNING SEQUENCES SECTION ] ------
  709. ###########################################
  710. # 'smart_fps_thresh' -> if you have a frame_set of 'alarm,snapshot,120,180,240,320,380,440,snapshot' and it is a LIVE
  711. # event. If the event is going to end up being 45 seconds long and frame_set calls a frame ID that is 'out of bounds'
  712. # i.e. event frame buffer is only @ 275 and frame_set requested 320, that's an overage of 45 frames. if your fps is 10 that's
  713. # 4.5 seconds over. if smart_fps_thresh is set to 8 (4.5 seconds is inside the 'threshold') it will wait around and
  714. # attempt to keep grabbing the frame up to 3 attempts later with a wait time calculated to be roughly the 8 seconds.
  715. # The default action is to set the frame ID to the last available frame and process that frame instead. That is what
  716. # will happen if the frame ID called is 8+ seconds worth of frames later (FPS is calculated in the script)
  717. # I think 4-8 is a good compromise for speed and being able to process a large spaced out frame set like the example above
  718. smart_fps_thresh: 5
  719.  
  720. # if enabled, will not grab exclusive locks before inferring
  721. # locking seems to cause issues on some unique file systems
  722. disable_locks: no
  723. stream_sequence:
  724. # 'most_models' (object+face+alpr), 'most', 'most_unique', 'first' #TYLER ADD 'union' for multiple frame detections per event!
  725. frame_strategy: '{{frame_strategy}}'
  726. frame_set: '{{frame_set}}'
  727. # ANY of the delay options can be set as xx or xx.yy
  728. # contig attempts and sleep
  729. contig_frames_before_error: 2
  730. delay_between_attempts: 2.143256
  731. max_attempts: 3 # attempts per frame (this is a 'batch' for above setting)
  732. # delay_between_frames: 0.4 # delay between every frame in frame_set
  733. # delay_between_snapshots takes precedence over the delay_between_frames if there will be a delay from both
  734. delay_between_snapshots: 1 # between snapshot frames, so previous frame has to be a snapshot and so does current
  735. smart_fps_thresh: '{{smart_fps_thresh}}'
  736.  
  737. # save every frame that is sent to the detection models. If you are processing a video or are getting weird results
  738. # turn this on and review the frames in the 'save_frames_dir' directory.
  739. # For thew time being it is your responsibility to clean up the directory after you are done (Script to do daily clean ups coming)
  740. save_frames: 'no' # (Default: no)
  741. save_frames_dir: # (Default: /tmp) - directory to save the 'save_frames' to
  742.  
  743. # When controlling a video file
  744. # start_frame: 1
  745. # frame_skip: 1
  746. # max_frames: 0
  747.  
  748. # If it is an event download mp4 file for the event and process the mp4 file instead of requesting frame by frame
  749. # from the API *** NOTE: You must have 'H264 Passthrough' Video Writer enabled in the monitor settings for this to work
  750. # pre_download: true
  751. # pre_download_dir: # (Default: /tmp) - directory to save the frames into
  752.  
  753. ml_sequence:
  754. general:
  755. model_sequence: '{{model_sequence}}'
  756. disable_locks: '{{disable_locks}}'
  757. match_past_detections: '{{match_past_detections}}'
  758. past_det_max_diff_area: '{{past_det_max_diff_area}}'
  759. # ignore_past_detection_labels: ['dog', 'cat']
  760. # when matching past detections, names in a group are treated the same
  761. # also adding <alias>_min_confidence <alias>_past_det_max_diff_size
  762. # example -> vehicles_min_confidence : 0.66
  763. aliases:
  764. vehicles: [ 'car', 'bus', 'truck', 'boat', 'motorcycle' ]
  765. plants: [ 'broccoli', 'pottedplant', 'potted_plant' ]
  766. animals: [ 'dog','cat','mouse','horse' ]
  767. # NOTE! per label overrides go here in 'general'
  768. # person_min_confidence: '{{person_min_confidence}}'
  769. # car_min_confidence: '{{car_min_confidence}}'
  770. # dog_min_confidence: '{{dog_min_confidence}}'
  771. # person_contained_area: '{{person_contained_area}}'
  772. # car_contained_area: '{{car_contained_area}}'
  773. # dog_contained_area: '{{dog_contained_area}}'
  774. # person_past_det_max_diff_area: '{{person_past_det_max_diff_area}}'
  775. # car_past_det_max_diff_area: '{{car_past_det_max_diff_area}}'
  776. # dog_past_det_max_diff_area: '{{dog_past_det_max_diff_area}}'
  777. # car_max_detection_size: '{{car_max_detection_size}}'
  778. # dog_max_detection_size: '{{dog_max_detection_size}}'
  779. # person_max_detection_size: '{{person_max_detection_size}}'
  780. object:
  781. general:
  782. object_detection_pattern: '{{object_detection_pattern}}'
  783. # 'first', 'most', 'most_unique', ****** 'union'
  784. same_model_sequence_strategy: '{{same_model_sequence_strategy}}'
  785. # HAS to be inside object->general as it only applies to object detection
  786. contained_area: '{{contained_area}}'
  787. sequence:
  788. # First run on TPU with higher confidence
  789. - name: 'coral::SSD-Lite MobileDet 312x312'
  790. enabled: 'no'
  791. object_weights: '{{tpu_object_weights_mobiledet}}'
  792. object_labels: '{{tpu_object_labels}}'
  793. object_min_confidence: '{{tpu_min_confidence}}'
  794. object_framework: '{{tpu_object_framework}}'
  795. tpu_max_processes: '{{tpu_max_processes}}'
  796. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  797. max_detection_size: '{{max_detection_size}}'
  798. # Second try MobileNetv2 object detection to compare to MobileDet results
  799. - name: 'coral::MobileNETv2-SSD 300x300'
  800. enabled: 'no'
  801. object_weights: '{{tpu_object_weights_mobilenetv2}}'
  802. object_labels: '{{tpu_object_labels}}'
  803. object_min_confidence: '{{tpu_min_confidence}}'
  804. object_framework: '{{tpu_object_framework}}'
  805. tpu_max_processes: '{{tpu_max_processes}}'
  806. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  807. max_detection_size: '{{max_detection_size}}'
  808. model_height: 300
  809. model_width: 300
  810. # New models
  811. - name: 'coral::MobileNETv2-SSD TensorFlow 2.0 300x300'
  812. enabled: 'yes'
  813. object_weights: '{{tpu_tf2_mobilenetv2}}'
  814. object_labels: '{{tpu_object_labels}}'
  815. object_min_confidence: '{{tpu_min_confidence}}'
  816. object_framework: '{{tpu_object_framework}}'
  817. tpu_max_processes: '{{tpu_max_processes}}'
  818. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  819. max_detection_size: '{{max_detection_size}}'
  820. model_height: 300
  821. model_width: 300
  822.  
  823. - name: 'coral::EfficientDet-Lite 3 512x512'
  824. enabled: 'yes'
  825. object_weights: '{{tpu_efficientdet_lite3}}'
  826. object_labels: '{{tpu_object_labels}}'
  827. object_min_confidence: '{{tpu_min_confidence}}'
  828. object_framework: '{{tpu_object_framework}}'
  829. tpu_max_processes: '{{tpu_max_processes}}'
  830. tpu_max_lock_wait: '{{tpu_max_lock_wait}}'
  831. max_detection_size: '{{max_detection_size}}'
  832. model_height: 512
  833. model_width: 512
  834.  
  835. - name: 'DarkNet::v4 Pre-Trained'
  836. enabled: 'yes'
  837. object_config: '{{yolo4_object_config}}'
  838. object_weights: '{{yolo4_object_weights}}'
  839. object_labels: '{{yolo4_object_labels}}'
  840. object_min_confidence: '{{object_min_confidence}}'
  841. object_framework: '{{yolo4_object_framework}}'
  842. object_processor: '{{yolo4_object_processor}}'
  843. gpu_max_processes: '{{gpu_max_processes}}'
  844. gpu_max_lock_wait: '{{gpu_max_lock_wait}}'
  845. cpu_max_processes: '{{cpu_max_processes}}'
  846. cpu_max_lock_wait: '{{cpu_max_lock_wait}}'
  847. # only applies to GPU, default is FP32; *** EXPERIMENTAL ***
  848. # fp16_target: '{{fp16_target}}'
  849. # at current moment this is a global setting turned on by just setting it to : yes
  850. show_models: '{{show_models}}'
  851.  
  852. # AWS Rekognition object detection
  853. # More info: https://medium.com/@michael-ludvig/aws-rekognition-support-for-zoneminder-object-detection-40b71f926a80
  854. - name: 'AWS rekognition (PAID)'
  855. enabled: 'no'
  856. object_framework: 'aws_rekognition'
  857. object_min_confidence: '0.7'
  858. # AWS region unless configured otherwise, e.g. in ~www-data/.aws/config
  859. aws_region: 'us-east-1'
  860. # AWS credentials from /etc/zm/secrets.ini
  861. # unless running on EC2 instance with instance IAM role (which is preferable)
  862. aws_access_key_id: '{[AWS_ACCESS_KEY_ID]}'
  863. aws_secret_access_key: '{[AWS_SECRET_ACCESS_KEY]}'
  864. # no other parameters are required
  865.  
  866. alpr:
  867. general:
  868. # every frame you send is counted as an API hit if using the cloud API
  869. same_model_sequence_strategy: 'first'
  870. # pre_existing_labels: ['car', 'motorbike', 'bus', 'truck', 'boat']
  871. # can make it a reg-ex for certain license plate numbers
  872. alpr_detection_pattern: '{{alpr_detection_pattern}}'
  873. sequence:
  874. # Try openALPR locally first (tweak with per camera openalpr.conf files, pre-warp and calibration, etc.)
  875. # also remember masks for timestamps etc. per camera config files are powerful though
  876. - name: 'openALPR Command Line'
  877. # enabled: 'no'
  878. alpr_service: 'open_alpr_cmdline'
  879. openalpr_cmdline_binary: '{{openalpr_cmdline_binary}}'
  880. openalpr_cmdline_params: '{{openalpr_cmdline_params}}'
  881. openalpr_cmdline_min_confidence: '{{openalpr_cmdline_min_confidence}}'
  882. max_size: '1600'
  883.  
  884. - name: 'Platerecognizer Cloud Service'
  885. enabled: 'no'
  886. # pel_any means as long as there are any detections, pel_none means only if there are no detections yet
  887. # pre_existing_labels: 'pel_any'
  888. # pre_existing_labels: ['car', 'motorbike', 'bus', 'truck', 'boat']
  889. alpr_api_type: 'cloud'
  890. alpr_service: 'plate_recognizer'
  891. alpr_key: '{{alpr_key}}'
  892. platrec_stats: '{{platerec_stats}}'
  893. platerec_min_dscore: '{{platerec_min_dscore}}'
  894. platerec_min_score: '{{platerec_min_score}}'
  895. # max_size: '1600'
  896. platerec_payload:
  897. regions: [ 'ca' ]
  898. # camera_id: 12
  899. # platerec_config:
  900. # region: 'strict'
  901. # mode: 'fast'
  902.  
  903. face:
  904. general:
  905. face_detection_pattern: '{{face_detection_pattern}}'
  906. # combine results below
  907. same_model_sequence_strategy: 'union'
  908. sequence:
  909. - name: 'Face Detection -> coral::MobileNETv2-SSD 320x320'
  910. enabled: 'no'
  911. face_detection_framework: 'tpu'
  912. face_weights: '{{tpu_face_weights_mobilenetv2}}'
  913. face_min_confidence: 0.3
  914. model_height: 320
  915. model_width: 320
  916.  
  917. - name: 'DLib::Face Detection/Recognition'
  918. enabled: 'yes'
  919. # Force CPU detection if you have a GPU (Before dlib used GPU if it was compiled with CUDA support regardless)
  920. # face_dlib_processor: cpu
  921.  
  922. # If you use TPU detection first, we can run this ONLY if TPU detects a face first
  923. # pre_existing_labels: [ 'face' ]
  924. save_unknown_faces: '{{save_unknown_faces}}'
  925. save_unknown_faces_leeway_pixels: '{{save_unknown_faces_leeway_pixels}}'
  926. face_detection_framework: '{{face_detection_framework}}'
  927. known_images_path: '{{known_images_path}}'
  928. unknown_images_path: '{{unknown_images_path}}'
  929. face_model: '{{face_model}}'
  930. face_train_model: '{{face_train_model}}'
  931. face_recog_dist_threshold: '{{face_recog_dist_threshold}}'
  932. face_num_jitters: '{{face_num_jitters}}'
  933. face_upsample_times: '{{face_upsample_times}}'
  934. gpu_max_processes: '{{gpu_max_processes}}'
  935. gpu_max_lock_wait: '{{gpu_max_lock_wait}}'
  936. cpu_max_processes: '{{cpu_max_processes}}'
  937. cpu_max_lock_wait: '{{cpu_max_lock_wait}}'
  938. max_size: 800
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement