View difference between Paste ID: n9C0dDdE and RU26SNj3
SHOW: | | - or go back to the newest paste.
1
import os
2
import cv2
3
from flask import Flask, jsonify, request, render_template, Response, redirect, url_for
4
from source.face_recognition import recognize_faces
5
from source.utils import draw_rectangles, read_image, prepare_image
6
from datetime import datetime
7
from time import gmtime, strftime, localtime
8
9
import pandas as pd
10
# import the necessary packages
11
from imutils.video import VideoStream
12
from imutils.video import FPS
13
import numpy as np
14
import argparse
15
import imutils
16
from imutils import paths
17
import pickle
18
import time
19
import cv2
20
import os
21
import csv
22
from collections import defaultdict
23
24
25
26
app = Flask(__name__)
27
video = cv2.VideoCapture(0)
28
29
app.config.from_object('config')
30
UPLOAD_FOLDER = os.path.basename('uploads')
31
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
32
33
@app.route('/')
34
def index():
35
    return render_template('index.html')
36
37
@app.route('/realtime')
38
def realtime():
39
    return render_template('realtime.html')
40
41
def gen(video):
42
43
    cleaner = pd.read_csv('attendance-system.csv')
44
    cleaner.drop(cleaner.index, inplace=True)
45
    cleaner.to_csv('attendance-system.csv', index=False)
46
47
    # construct the argument parser and parse the arguments
48
    ap = argparse.ArgumentParser()
49
    ap.add_argument("-d", "--detector", default="face_detection_model",
50
                    help="path to OpenCV's deep learning face detector")
51
    ap.add_argument("-m", "--embedding-model", default="models/openface_nn4.small2.v1.t7",
52
                    help="path to OpenCV's deep learning face embedding model")
53
    ap.add_argument("-r", "--recognizer", default="models/5c_cnn_recognizer.pickle",
54
                    help="path to model trained to recognize faces")
55
    ap.add_argument("-l", "--le", default="models/5c_cnn_labelencoder.pickle",
56
                    help="path to label encoder")
57
    ap.add_argument("-c", "--confidence", type=float, default=0.5,
58
                    help="minimum probability to filter weak detections")
59
    args = vars(ap.parse_args())
60
61
    # load our serialized face detector from disk
62
    print("[INFO] loading face detector...")
63
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
64
    modelPath = os.path.sep.join([args["detector"],
65
                                  "res10_300x300_ssd_iter_140000.caffemodel"])
66
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
67
68
    # load our serialized face embedding model from disk
69
    print("[INFO] loading face recognizer...")
70
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
71
72
    # load the actual face recognition model along with the label encoder
73
    recognizer = pickle.loads(open(args["recognizer"], "rb").read())
74
    le = pickle.loads(open(args["le"], "rb").read())
75
76
    # initialize the video stream, then allow the camera sensor to warm up
77
78
    # start the FPS throughput estimator
79
    fps = FPS().start()
80
    faces_list = []
81
    proba_list = []
82
    proba = 0
83
    count = 0
84
    now = datetime.now()
85
    dictionaryin = {}
86
    dictionaryout = {}
87
88
    unknown_counter = 0
89
90
    # loop over frames from the video file stream
91
    while True:
92
        # grab the frame from the threaded video stream
93
        success, image = video.read()
94
95
        frame = image
96
        # resize the frame to have a width of 600 pixels (while
97
        # maintaining the aspect ratio), and then grab the image
98
        # dimensions
99
        frame = imutils.resize(frame, width=600)
100
        (h, w) = frame.shape[:2]
101
102
        dt_string = now.strftime("%d/%m/%Y")
103
        hr_string = strftime("%H:%M:%S", localtime())
104
105
        # construct a blob from the image
106
        imageBlob = cv2.dnn.blobFromImage(
107
            cv2.resize(frame, (300, 300)), 1.0, (300, 300),
108
            (104.0, 177.0, 123.0), swapRB=False, crop=False)
109
110
        # apply OpenCV's deep learning-based face detector to localize
111
        # faces in the input image
112
        detector.setInput(imageBlob)
113
        detections = detector.forward()
114
115
        # loop over the detections
116
        for i in range(0, detections.shape[2]):
117
            # extract the confidence (i.e., probability) associated with
118
            # the prediction
119
            confidence = detections[0, 0, i, 2]
120
121
            # filter out weak detections
122
            if confidence > args["confidence"]:
123
                # compute the (x, y)-coordinates of the bounding box for
124
                # the face
125
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
126
                (startX, startY, endX, endY) = box.astype("int")
127
128
                # extract the face ROI
129
                face = frame[startY:endY, startX:endX]
130
                (fH, fW) = face.shape[:2]
131
132
                # ensure the face width and height are sufficiently large
133
                if fW < 20 or fH < 20:
134
                    continue
135
136
                # construct a blob for the face ROI, then pass the blob
137
                # through our face embedding model to obtain the 128-d
138
                # quantification of the face
139
                faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
140
                                                 (96, 96), (0, 0, 0), swapRB=True, crop=False)
141
                embedder.setInput(faceBlob)
142
                vec = embedder.forward()
143
144
                # perform classification to recognize the face
145
                preds = recognizer.predict_proba(vec)
146
                j = np.argmax(preds)
147
                proba = preds[j]
148
                name = le.classes_[j]
149
                img_counter = 0
150
151
                # draw the bounding box of the face along with the
152
                # associated probability
153
                text = "{}: {:.2f}%".format(name, proba * 100)
154
                y = startY - 10 if startY - 10 > 10 else startY + 10
155
                cv2.rectangle(frame, (startX, startY), (endX, endY),
156
                              (0, 0, 255), 2)
157
                cv2.putText(frame, text, (startX, y),
158
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
159
160
                # print(le.classes_)
161
162
                if proba >= 0.70:
163
                    faces_list.append(name)
164
                    proba_list.append(proba)
165
                    count = count + 1
166
167-
                if name == "Mridulata":
167+
                if name == "name1":
168
                    if proba >= 0.80:
169-
                        cv2.putText(frame, "WELCOME MRIDULATA!!!", (40, 60),
169+
                        cv2.putText(frame, "WELCOME name1!!!", (40, 60),
170
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
171
172-
                if name == "Smrity":
172+
                if name == "name2":
173
                    if proba >= 0.80:
174-
                        cv2.putText(frame, "WELCOME SMRITY!!!", (40, 60),
174+
                        cv2.putText(frame, "WELCOME name2!!!", (40, 60),
175
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
176
177-
                if name == "saloni":
177+
                if name == "name3":
178
                    if proba >= 0.80:
179-
                        cv2.putText(frame, "WELCOME SALONI!!!", (40, 60),
179+
                        cv2.putText(frame, "WELCOME name3!!!", (40, 60),
180
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
181
182-
                if name == "Sujata":
182+
                if name == "name4":
183
                    if proba >= 0.80:
184-
                        cv2.putText(frame, "WELCOME SUJATA!!!", (40, 60),
184+
                        cv2.putText(frame, "WELCOME name4!!!", (40, 60),
185
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
186
187
                if name == "unknown":
188
                    if proba >= 0.80:
189
                        unknown_dir = "images/unknown"
190
                        test = datetime
191
                        unknowns_name = unknown_dir + os.sep + "unknown" + ".jpg"
192
                        cv2.imwrite(unknowns_name, frame)
193
                        unknown_counter += 1
194
195
        if count == 20:
196
197
            d = defaultdict(list)
198
            for key, value in zip(faces_list, proba_list):
199
                d[key].append(value)
200
            occurence = dict(d)
201
            thisset = set(occurence)
202
            for x in thisset:
203
                occurance_individual = len(occurence[x])
204
                occurence[x] = sum(item for item in occurence[x])
205
206
            a = sum(occurence.values())
207
208
            for x in thisset:
209
                occurence[x] = occurence[x] / a
210
211
            attendance = {word for word, prob in occurence.items() if prob >= 0.3}
212
            # students = max(occurence, key=occurence.get)
213
            students = list(attendance)
214
215
            headers = ['Date', 'Name', 'Time Sign In', 'Time Sign Out']
216
217
            def write_csv(data):
218
219
                with open('attendance-system.csv', 'a') as outfile:
220
                    outfile.truncate()
221
                    file_is_empty = os.stat('attendance-system.csv').st_size == 0
222
                    writer = csv.writer(outfile, lineterminator='\n', )
223
                    if file_is_empty:
224
                        writer.writerow(headers)
225
226
                    writer.writerow(data)
227
228
            # time.sleep(1)
229
            current_hour = datetime.now().second
230
            fps.stop()
231
            waktu = fps.elapsed()
232
233
            if waktu >= 0 and waktu <= 15:
234
                print('Attendance system Open for sign in')
235
                for a in students:
236
                    write_csv([dt_string, a, hr_string, ''])
237
238
                records = pd.read_csv('attendance-system.csv')  # Records dictionaryin for notification
239
                deduped = records.drop_duplicates(['Name'], keep='first')
240
                deduped = deduped.drop(columns=['Time Sign Out'])
241
                dictionaryin = deduped.set_index('Name').T.to_dict('list')
242
243
            elif waktu >= 30 and waktu <= 45:
244
245
                for a in students:
246
                    write_csv([dt_string, a, '', hr_string])
247
                print('Attendance system Open for sign out')
248
249
                records = pd.read_csv('attendance-system.csv')  # Records dictionaryout for notification
250
                signed_out = records.loc[records['Time Sign Out'].notna()]
251
                deduped_out = signed_out.drop_duplicates(['Name'], keep='first')
252
                deduped_out = deduped_out.drop(columns=['Time Sign In'])
253
                dictionaryout = deduped_out.set_index('Name').T.to_dict('list')
254
            else:
255
                print('Attendance system close until Next Course')
256
257
            print(dt_string, hr_string, students)
258
259
            faces_list.clear()
260
            proba_list.clear()
261
            count = 0
262
263
264
265
        # update the FPS counter
266
        fps.update()
267
268
        ret, jpeg = cv2.imencode('.jpg', frame)
269
        frame = jpeg.tobytes()
270
        yield (b'--frame\r\n'
271
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
272
273
        key = cv2.waitKey(1) & 0xFF
274
275
        # if the `q` key was pressed, break from the loop
276
        if key == ord("q"):
277
            break
278
279
280
281
# stop the timer and display FPS information
282
        fps.stop()
283
284
        records = pd.read_csv('attendance-system.csv')
285
        deduped = records.drop_duplicates(['Name'], keep='first')
286
        deduped = deduped.drop(columns=['Time Sign Out'])
287
288
        signed_out = records.loc[records['Time Sign Out'].notna()]
289
        deduped_out = signed_out.drop_duplicates(['Name'], keep='first')
290
        deduped_out = deduped_out.drop(columns=['Time Sign In'])
291
292
        mergedStuff = pd.merge(deduped, deduped_out, on=['Name'], suffixes=(' Sign In', ' Sign Out'))
293
        attend_data = mergedStuff[mergedStuff.Name != 'unknown']
294
        attend_data.to_csv('attendance-data.csv', index=False)
295
296
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
297
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
298
1
299
        # do a bit of cleanup
300
301
cv2.destroyAllWindows()
302
303
@app.route('/video_feed')
304
def video_feed():
305
    global video
306
    return Response(gen(video),
307
                    mimetype='multipart/x-mixed-replace; boundary=frame')
308
309
310
311
312
313
@app.route('/recognize', methods=['POST'])
314
def detect():
315
    file = request.files['image']
316
317
    # Read image
318
    image = read_image(file)
319
320
    # Recognize faces
321
    classifier_model_path = "models" + os.sep + "4c_recognizer.pickle"
322
    label_encoder_path = "models" + os.sep + "4c_labelencoder.pickle"
323
    faces = recognize_faces(image, classifier_model_path, label_encoder_path,
324
                            detection_api_url=app.config["DETECTION_API_URL"])
325
326
    return jsonify(recognitions=faces)
327
328
329
@app.route('/upload', methods=['POST'])
330
def upload():
331
    file = request.files['image']
332
333
    # Read image
334
    image = read_image(file)
335
336
    # Recognize faces
337
    classifier_model_path = "models" + os.sep + "4c_recognizer.pickle"
338
    label_encoder_path = "models" + os.sep + "4c_labelencoder.pickle"
339
    faces = recognize_faces(image, classifier_model_path, label_encoder_path,
340
                            detection_api_url=app.config["DETECTION_API_URL"])
341
342
    # Draw detection rects
343
    draw_rectangles(image, faces)
344
345
    # Prepare image for html
346
    to_send = prepare_image(image)
347
348
    return render_template('stillphoto.html', face_recognized=len(faces) > 0, num_faces=len(faces), image_to_show=to_send,
349
                           init=True)
350
351
352
353
@app.route('/static')
354
def static_page():
355
    with app.app_context():
356
        return render_template('stillphoto.html')
357
358
@app.route('/admin')
359
def admin():
360
    with app.app_context():
361
        return render_template('admin.html')
362
363
@app.route('/employee')
364
def employee():
365
    with app.app_context():
366
        return render_template('employee.html')
367
368
@app.route('/login')
369
def login():
370
    return render_template('login.html')
371
372
def gene(video):
373
    cleaner = pd.read_csv('attendance-system.csv')
374
    cleaner.drop(cleaner.index, inplace=True)
375
    cleaner.to_csv('attendance-system.csv', index=False)
376
377
    # construct the argument parser and parse the arguments
378
    ap = argparse.ArgumentParser()
379
    ap.add_argument("-d", "--detector", default="face_detection_model",
380
                    help="path to OpenCV's deep learning face detector")
381
    ap.add_argument("-m", "--embedding-model", default="models/openface_nn4.small2.v1.t7",
382
                    help="path to OpenCV's deep learning face embedding model")
383
    ap.add_argument("-r", "--recognizer", default="models/5c_cnn_recognizer.pickle",
384
                    help="path to model trained to recognize faces")
385
    ap.add_argument("-l", "--le", default="models/5c_cnn_labelencoder.pickle",
386
                    help="path to label encoder")
387
    ap.add_argument("-c", "--confidence", type=float, default=0.5,
388
                    help="minimum probability to filter weak detections")
389
    args = vars(ap.parse_args())
390
391
    # load our serialized face detector from disk
392
    print("[INFO] loading face detector...")
393
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
394
    modelPath = os.path.sep.join([args["detector"],
395
                                  "res10_300x300_ssd_iter_140000.caffemodel"])
396
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
397
398
    # load our serialized face embedding model from disk
399
    print("[INFO] loading face recognizer...")
400
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
401
402
    # load the actual face recognition model along with the label encoder
403
    recognizer = pickle.loads(open(args["recognizer"], "rb").read())
404
    le = pickle.loads(open(args["le"], "rb").read())
405
406
    # initialize the video stream, then allow the camera sensor to warm up
407
408
    # start the FPS throughput estimator
409
    fps = FPS().start()
410
    faces_list = []
411
    proba_list = []
412
    proba = 0
413
    count = 0
414
    now = datetime.now()
415
    dictionaryin = {}
416
    dictionaryout = {}
417
418
    unknown_counter = 0
419
420
    # loop over frames from the video file stream
421
422
423
424
    while True:
425
        # grab the frame from the threaded video stream
426
        success, image = video.read()
427
428
        frame = image
429
        # resize the frame to have a width of 600 pixels (while
430
        # maintaining the aspect ratio), and then grab the image
431
        # dimensions
432
        frame = imutils.resize(frame, width=600)
433
        (h, w) = frame.shape[:2]
434
435
        dt_string = now.strftime("%d/%m/%Y")
436
        hr_string = strftime("%H:%M:%S", localtime())
437
438
        # construct a blob from the image
439
        imageBlob = cv2.dnn.blobFromImage(
440
            cv2.resize(frame, (300, 300)), 1.0, (300, 300),
441
            (104.0, 177.0, 123.0), swapRB=False, crop=False)
442
443
        # apply OpenCV's deep learning-based face detector to localize
444
        # faces in the input image
445
        detector.setInput(imageBlob)
446
        detections = detector.forward()
447
448
        # loop over the detections
449
        for i in range(0, detections.shape[2]):
450
            # extract the confidence (i.e., probability) associated with
451
            # the prediction
452
            confidence = detections[0, 0, i, 2]
453
454
            # filter out weak detections
455
            if confidence > args["confidence"]:
456
                # compute the (x, y)-coordinates of the bounding box for
457
                # the face
458
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
459
                (startX, startY, endX, endY) = box.astype("int")
460
461
                # extract the face ROI
462
                face = frame[startY:endY, startX:endX]
463
                (fH, fW) = face.shape[:2]
464
465
                # ensure the face width and height are sufficiently large
466
                if fW < 20 or fH < 20:
467
                    continue
468
469
                # construct a blob for the face ROI, then pass the blob
470
                # through our face embedding model to obtain the 128-d
471
                # quantification of the face
472
                faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
473
                                                 (96, 96), (0, 0, 0), swapRB=True, crop=False)
474
                embedder.setInput(faceBlob)
475
                vec = embedder.forward()
476
477
                # perform classification to recognize the face
478
                preds = recognizer.predict_proba(vec)
479
                j = np.argmax(preds)
480
                proba = preds[j]
481
                name = le.classes_[j]
482
                img_counter = 0
483
484
                # draw the bounding box of the face along with the
485
                # associated probability
486
                text = "{}: {:.2f}%".format(name, proba * 100)
487
                y = startY - 10 if startY - 10 > 10 else startY + 10
488
                cv2.rectangle(frame, (startX, startY), (endX, endY),
489
                              (0, 0, 255), 2)
490
                cv2.putText(frame, text, (startX, y),
491
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
492
493
                # print(le.classes_)
494
495
                if proba >= 0.70:
496
                    faces_list.append(name)
497
                    proba_list.append(proba)
498
                    count = count + 1
499
500-
                if name == "Mridulata":
500+
                if name == "name1":
501
                    if proba >= 0.80:
502-
                        name = "Mridulata"
502+
                        name = "name1"
503
                        with app.app_context():
504
                            return  render_template('admin.html', value = name)
505
506-
                if name == "Smrity":
506+
                if name == "name2":
507
                    if proba >= 0.40:
508-
                        name = "Smrity"
508+
                        name = "name2"
509
                        with app.app_context():
510
                            return render_template('employee.html', value=name)
511
512-
                if name == "saloni":
512+
                if name == "name3":
513
                    if proba >= 0.40:
514
                        with app.app_context():
515
                            return render_template('employee.html', value=name)
516
517-
                if name == "Sujata":
517+
                if name == "name4":
518
                    if proba >= 0.40:
519
                        with app.app_context():
520
                            return render_template('employee.html', value=name)
521
522
                if name == "unknown":
523
                    if proba >= 0.80:
524
                        unknown_dir = "images/unknown"
525
                        test = datetime
526
                        unknowns_name = unknown_dir + os.sep + "unknown" + ".jpg"
527
                        cv2.imwrite(unknowns_name, frame)
528
                        unknown_counter += 1
529
530
531
532
        if count == 20:
533
534
            d = defaultdict(list)
535
            for key, value in zip(faces_list, proba_list):
536
                d[key].append(value)
537
            occurence = dict(d)
538
            thisset = set(occurence)
539
            for x in thisset:
540
                occurance_individual = len(occurence[x])
541
                occurence[x] = sum(item for item in occurence[x])
542
543
            a = sum(occurence.values())
544
545
            for x in thisset:
546
                occurence[x] = occurence[x] / a
547
548
            attendance = {word for word, prob in occurence.items() if prob >= 0.3}
549
            # students = max(occurence, key=occurence.get)
550
            students = list(attendance)
551
552
            headers = ['Date', 'Name', 'Time Sign In', 'Time Sign Out']
553
554
            def write_csv(data):
555
556
                with open('attendance-system.csv', 'a') as outfile:
557
                    outfile.truncate()
558
                    file_is_empty = os.stat('attendance-system.csv').st_size == 0
559
                    writer = csv.writer(outfile, lineterminator='\n', )
560
                    if file_is_empty:
561
                        writer.writerow(headers)
562
563
                    writer.writerow(data)
564
565
            # time.sleep(1)
566
            current_hour = datetime.now().second
567
            fps.stop()
568
            waktu = fps.elapsed()
569
570
            if waktu >= 0 and waktu <= 15:
571
                print('Attendance system Open for sign in')
572
                for a in students:
573
                    write_csv([dt_string, a, hr_string, ''])
574
575
                records = pd.read_csv('attendance-system.csv')  # Records dictionaryin for notification
576
                deduped = records.drop_duplicates(['Name'], keep='first')
577
                deduped = deduped.drop(columns=['Time Sign Out'])
578
                dictionaryin = deduped.set_index('Name').T.to_dict('list')
579
580
            elif waktu >= 30 and waktu <= 45:
581
582
                for a in students:
583
                    write_csv([dt_string, a, '', hr_string])
584
                print('Attendance system Open for sign out')
585
586
                records = pd.read_csv('attendance-system.csv')  # Records dictionaryout for notification
587
                signed_out = records.loc[records['Time Sign Out'].notna()]
588
                deduped_out = signed_out.drop_duplicates(['Name'], keep='first')
589
                deduped_out = deduped_out.drop(columns=['Time Sign In'])
590
                dictionaryout = deduped_out.set_index('Name').T.to_dict('list')
591
            else:
592
                print('Attendance system close until Next Course')
593
594
            print(dt_string, hr_string, students)
595
596
            faces_list.clear()
597
            proba_list.clear()
598
            count = 0
599
600
        # update the FPS counter
601
        fps.update()
602
603
        ret, jpeg = cv2.imencode('.jpg', frame)
604
        frame = jpeg.tobytes()
605
        yield (b'--frame\r\n'
606
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
607
608
        # stop the timer and display FPS information
609
610
        key = cv2.waitKey(1) & 0xFF
611
612
        # if the `q` key was pressed, break from the loop
613
        if key == ord("q"):
614
            break
615
616
        fps.stop()
617
618
        records = pd.read_csv('attendance-system.csv')
619
        deduped = records.drop_duplicates(['Name'], keep='first')
620
        deduped = deduped.drop(columns=['Time Sign Out'])
621
622
        signed_out = records.loc[records['Time Sign Out'].notna()]
623
        deduped_out = signed_out.drop_duplicates(['Name'], keep='first')
624
        deduped_out = deduped_out.drop(columns=['Time Sign In'])
625
626
        mergedStuff = pd.merge(deduped, deduped_out, on=['Name'], suffixes=(' Sign In', ' Sign Out'))
627
        attend_data = mergedStuff[mergedStuff.Name != 'unknown']
628
        attend_data.to_csv('attendance-data.csv', index=False)
629
630
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
631
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
632
633
        # do a bit of cleanup
634
635
636
637
638
639
@app.route('/login_feed')
640
def login_feed():
641
    global video
642
    return Response(gene(video),
643
                    mimetype='multipart/x-mixed-replace; boundary=frame')
644
645
cv2.destroyAllWindows()
646
647
648
@app.route('/profile/<username>')
649
def profile(username):
650
    return "welcome to profile page %s" % username
651
652
653
if __name__ == '__main__':
654
    app.run(host='0.0.0.0', port=5000, threaded=True)
655
656
657