Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import numpy as np
- #import time
- from keras.models import load_model
- import tensorflow as tf
- import keras.backend as K
- #global graph, model
- #graph = tf.get_default_graph()
- config = tf.ConfigProto()
- config.gpu_options.allow_growth =True
- sess = tf.Session(config=config)
- cascade = cv2.CascadeClassifier('D:\Code\haarcascade_frontalface_default.xml')
- modelPath = 'D:\Code\model 32 7 sigmoid 0.3 0.005.hdf5'
- model = load_model(modelPath)
- model._make_predict_function()
- class vid():
- def __init__(self):
- self.cam = cv2.VideoCapture(-1)
- self.cam.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
- self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,720)
- self.evalPredict = [0,0,0]
- def __del__(self):
- self.cam.release()
- def getFrame(self):
- work, img = self.cam.read()
- img = cv2.flip(img,1)
- self.gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
- self.face = cascade.detectMultiScale(self.gray,scaleFactor=1.5,minNeighbors=5)
- for (x,y,w,h) in self.face:
- self.roi = self.gray[y:y+h,x:x+w]
- self.color = (255,255,0)
- self.stroke = 3
- self.endX = x+w
- self.endY = y+h
- img = cv2.rectangle(img,(x,y),(self.endX,self.endY),self.color,self.stroke)
- self.eval = self.evaluate(self.roi)
- self.evalPredict = self.eval
- #print(self.evalPredict)
- ret,jpeg = cv2.imencode('.jpeg',img)
- return jpeg.tobytes()
- def evaluate(self,img):
- img = cv2.resize(img,(128,128))
- img = np.array(img)
- self.predict = self.prediction(img)
- self.predict = self.predict[0]
- return self.predict
- def prediction(self,roi):
- self.predict = model.predict(roi.reshape(1,128,128,1))
- #K.clear_session()
- return self.predict
- from flask import Flask, render_template, Response
- from cam import vid
- from flask_socketio import SocketIO, send,emit
- import eventlet
- eventlet.monkey_patch()
- app = Flask(__name__)
- app.config['SECRET_KEY'] ='secret'
- socketio = SocketIO(app,message = 'http://127.0.0.1:500',async_mode='threading')
- @app.route('/')
- def index():
- return render_template('index.html')
- def gen(cam):
- while True:
- frame = cam.getFrame()
- anger = cam.evalPredict[0]
- happy = cam.evalPredict[1]
- sad = cam.evalPredict[2]
- yield(b'--framern'
- b'Content-Type: image/jpegrnrn'+frame+b'rnrn')
- handleMessage(str(anger))
- @app.route('/video')
- def video():
- return Response(gen(vid()),
- mimetype='multipart/x-mixed-replace; boundary=frame')
- @socketio.on('message')
- def handleMessage(anger):
- with app.test_request_context('/'):
- emit('Anger Response',anger,namespace='/anger',broadcast=True)
- if __name__ == '__main__':
- socketio.run(app,debug = True)
- <!DOCTYPE html>
- <html lang="en" dir="ltr">
- <head>
- <meta charset="utf-8">
- <title>MICREX</title>
- <script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.2.0/socket.io.dev.js" charset="utf-8"></script>
- <script
- src="https://code.jquery.com/jquery-3.3.1.js"
- integrity="sha256-2Kok7MbOyxpgUVvAk/HJ2jigOSYS2auK4Pfzbm7uH60="
- crossorigin="anonymous"></script>
- </head>
- <body>
- <script type="text/javascript">
- $(document).ready(function(){
- var socket = io.connect('http://'+document.domain+':'+location.port+'/anger');
- //socket.on('message',function(a){
- socket.on('Anger Response',function(a){
- $("#h2").html(a);
- console.log(a)
- });
- //});
- $('#h2').html('Working');
- });
- </script>
- <h1>Video Stream</h1>
- <img src="{{ url_for('video') }}" alt="">
- <h2 id="h2">ABC</h2>
Add Comment
Please, Sign In to add comment