Advertisement
safwan092

Project_ML_RPi_Code

May 11th, 2023
29
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.49 KB | None | 0 0
  1. #!/usr/bin/python3
  2. import subprocess
  3. import numpy as np
  4. import re
  5. import cv2
  6. from tflite_runtime.interpreter import Interpreter
  7. import os
  8. import time
  9. import sys
  10. import math
  11. import glob
  12. import signal
  13. from datetime import datetime,timedelta
  14. #import pygame
  15. from pygame import mixer
  16. #pygame.mixer.init()
  17. mixer.init()
  18.  
  19. def alert(audio, timer):
  20. mixer.music.load(audio)
  21. mixer.music.set_volume(0.7)
  22. mixer.music.play()
  23. #pygame.mixer.Sound(audio).play()
  24. time.sleep(timer)
  25. #pygame.mixer.Sound(audio).stop()
  26. #mixer.music.stop()
  27. time.sleep(0.5)
  28. print("Audio File Finished Playing")
  29.  
  30. check = 1
  31.  
  32. # clear ram
  33. pics = glob.glob('/run/shm/test*.jpg')
  34. for t in range(0,len(pics)):
  35. os.remove(pics[t])
  36.  
  37. def Camera_start(wx,hx):
  38. global p
  39. rpistr = "libcamera-vid -t 0 --segment 1 --codec mjpeg -n -o /run/shm/test%06d.jpg --width " + str(wx) + " --height " + str(hx)
  40. p = subprocess.Popen(rpistr, shell=True, preexec_fn=os.setsid)
  41.  
  42. #initialise variables
  43. width = 640 #720
  44. height = 480 #540
  45. CAMERA_WIDTH = 640
  46. CAMERA_HEIGHT = 480
  47. #face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
  48. #eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
  49. #open_eyes_detected = 0
  50. #face_detected = 0
  51. start = 0
  52. cv2.namedWindow('Frame')
  53. Text = "Left Mouse click on picture to EXIT, Right Mouse click for eye detaction ON/OFF"
  54. ttrat = time.time()
  55.  
  56. #define mouse clicks (LEFT to EXIT, RIGHT to switch eye detcetion ON/OFF)
  57. def mouse_action(event, x, y, flags, param):
  58. global p,check
  59. if event == cv2.EVENT_LBUTTONDOWN:
  60. os.killpg(p.pid, signal.SIGTERM)
  61. cv2.destroyAllWindows()
  62. sys.exit()
  63. if event == cv2.EVENT_RBUTTONDOWN:
  64. if check == 0:
  65. check = 1
  66. else:
  67. check = 0
  68.  
  69. cv2.setMouseCallback('Frame',mouse_action)
  70.  
  71. # start capturing images
  72. Camera_start(width,height)
  73.  
  74. def load_labels(path='labels.txt'):
  75. """Loads the labels file. Supports files with or without index numbers."""
  76. with open(path, 'r', encoding='utf-8') as f:
  77. lines = f.readlines()
  78. labels = {}
  79. for row_number, content in enumerate(lines):
  80. pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
  81. if len(pair) == 2 and pair[0].strip().isdigit():
  82. labels[int(pair[0])] = pair[1].strip()
  83. else:
  84. labels[row_number] = pair[0].strip()
  85. return labels
  86.  
  87. def set_input_tensor(interpreter, image):
  88. """Sets the input tensor."""
  89. tensor_index = interpreter.get_input_details()[0]['index']
  90. input_tensor = interpreter.tensor(tensor_index)()[0]
  91. input_tensor[:, :] = np.expand_dims((image-255)/255, axis=0)
  92.  
  93.  
  94. def get_output_tensor(interpreter, index):
  95. """Returns the output tensor at the given index."""
  96. output_details = interpreter.get_output_details()[index]
  97. tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
  98. return tensor
  99.  
  100.  
  101. def detect_objects(interpreter, image, threshold):
  102. """Returns a list of detection results, each a dictionary of object info."""
  103. set_input_tensor(interpreter, image)
  104. interpreter.invoke()
  105. # Get all output details
  106. boxes = get_output_tensor(interpreter, 1)
  107. classes = get_output_tensor(interpreter, 3)
  108. scores = get_output_tensor(interpreter, 0)
  109. count = int(get_output_tensor(interpreter, 2))
  110.  
  111. results = []
  112. for i in range(count):
  113. if scores[i] >= threshold:
  114. result = {
  115. 'bounding_box': boxes[i],
  116. 'class_id': classes[i],
  117. 'score': scores[i]
  118. }
  119. results.append(result)
  120. return results
  121.  
  122. def main():
  123. labels = load_labels()
  124. interpreter = Interpreter('detect.tflite')
  125. interpreter.allocate_tensors()
  126. _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
  127.  
  128. #cap = cv2.VideoCapture(0)
  129. while True:
  130.  
  131. # remove message after 3 seconds
  132. #if time.time() - ttrat > 3 and ttrat > 0:
  133. # Text =""
  134. # ttrat = 0
  135.  
  136. # load image
  137. pics = glob.glob('/run/shm/test*.jpg')
  138. while len(pics) < 2:
  139. pics = glob.glob('/run/shm/test*.jpg')
  140. pics.sort(reverse=True)
  141. imgf = cv2.imread(pics[1])
  142. if len(pics) > 2:
  143. for tt in range(2,len(pics)):
  144. os.remove(pics[tt])
  145.  
  146. #ret, frame = cap.read()
  147. img = cv2.resize(cv2.cvtColor(imgf, cv2.COLOR_BGR2RGB), (320,320))
  148. res = detect_objects(interpreter, img, 0.7)
  149. print(res)
  150.  
  151.  
  152. for result in res:
  153. ymin, xmin, ymax, xmax = result['bounding_box']
  154. xmin = int(max(1,xmin * CAMERA_WIDTH))
  155. xmax = int(min(CAMERA_WIDTH, xmax * CAMERA_WIDTH))
  156. ymin = int(max(1, ymin * CAMERA_HEIGHT))
  157. ymax = int(min(CAMERA_HEIGHT, ymax * CAMERA_HEIGHT))
  158.  
  159. cv2.rectangle(imgf,(xmin, ymin),(xmax, ymax),(0,255,0),3)
  160. cv2.putText(imgf,labels[int(result['class_id'])],(xmin, min(ymax, CAMERA_HEIGHT-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),2,cv2.LINE_AA)
  161.  
  162. # Website to Convert from Whatsapp voice to [mp3] - https://convertio.co/ogg-mp3/
  163. if labels[int(result['class_id'])] == 'goodjob':
  164. print('play SOUND Good Job')
  165. alert("goodjob.mp3",3)
  166. if labels[int(result['class_id'])] == 'goodlife':
  167. print('play SOUND Good Life')
  168. alert("goodlife.mp3",3)
  169. if labels[int(result['class_id'])] == 'hello':
  170. print('play SOUND HELLO')
  171. alert("hello.mp3",3)
  172. if labels[int(result['class_id'])] == 'iloveyou':
  173. print('play SOUND I Love You')
  174. alert("iloveyou.mp3",3)
  175. if labels[int(result['class_id'])] == 'notsure':
  176. print('play SOUND Not Sure')
  177. alert("notsure.mp3",3)
  178. if labels[int(result['class_id'])] == 'ok':
  179. print('play SOUND OK')
  180. alert("ok.mp3",3)
  181. if labels[int(result['class_id'])] == 'thisiscool':
  182. print('play SOUND This Is Cool')
  183. alert("thisiscool.mp3",3)
  184. #cv2.imshow('Pi Feed', frame)
  185. #display image
  186. cv2.putText(img,Text, (10, height - 10), 0, 0.4, (0, 255, 255))
  187. cv2.imshow('Frame',imgf)
  188. cv2.waitKey(10)
  189.  
  190. if cv2.waitKey(10) & 0xFF ==ord('q'):
  191. cap.release()
  192. cv2.destroyAllWindows()
  193.  
  194. if __name__ == "__main__":
  195. main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement