Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function
- import numpy as np
- import os
- import sys
- import tensorflow as tf
- import argparse
- import cv2
- import time
- from object_detection.utils import visualization_utils as vis_util
- slim = tf.contrib.slim
- def GetAllFilesListRecusive(path, extensions):
- files_all = []
- for root, subFolders, files in os.walk(path):
- for name in files:
- # linux tricks with .directory that still is file
- if not 'directory' in name and sum([ext in name for ext in extensions]) > 0:
- files_all.append(os.path.join(root, name))
- return files_all
- snapshot_dir = './snapshots/'
- SAVE_DIR = './output/'
- def calculate_perfomance(sess, input, output, shape, runs = 1000, batch_size = 1):
- start = time.time()
- print('Calculating inference time on size', shape)
- # To exclude numpy generating time
- N = 100
- for i in range(0, N):
- img = np.random.random((batch_size, shape[0], shape[1], 3))
- stop = time.time()
- time_for_generate = (stop - start) / N
- # warmup
- sess.run([output],
- feed_dict={input: img})
- start = time.time()
- for i in range(runs):
- img = np.random.random((batch_size, shape[0], shape[1], 3))
- sess.run([output],
- feed_dict={input: img})
- stop = time.time()
- inf_time = ((stop - start) / float(runs)) - time_for_generate
- print('Average inference time: {}'.format(inf_time))
- def get_arguments():
- parser = argparse.ArgumentParser(description="Object Detection Inference")
- parser.add_argument("--img-path", type=str, default='./input',
- help="Path to the RGB image file.",
- required=False)
- parser.add_argument("--save-dir", type=str, default=SAVE_DIR,
- help="Path to save output.")
- parser.add_argument("--snapshots-dir", type=str, default=snapshot_dir,
- help="Path to checkpoints.")
- parser.add_argument("--pb-file", type=str, default='',
- help="Path to to pb file, alternative for checkpoint. If set, checkpoints will be ignored")
- parser.add_argument("--weighted", action="store_true", default=False,
- help="If true, will output weighted images")
- parser.add_argument("--batch-size", type = int, default = 1,
- help="Size of batch for time measure")
- parser.add_argument("--measure-time", action="store_true", default=False,
- help="Evaluate only model inference time")
- parser.add_argument("--runs", type=int, default=100,
- help="Repeats for time measure. More runs - longer testing - more precise results")
- parser.add_argument("--with_score", action="store_true", default=False,
- help="If true will try to calculate score basing on dirs as classes")
- return parser.parse_args()
- def save(saver, sess, logdir, step):
- model_name = 'model.ckpt'
- checkpoint_path = os.path.join(logdir, model_name)
- if not os.path.exists(logdir):
- os.makedirs(logdir)
- saver.save(sess, checkpoint_path, global_step=step)
- print('The checkpoint has been created.')
- def load(saver, sess, ckpt_path):
- saver.restore(sess, ckpt_path)
- print("Restored model parameters from {}".format(ckpt_path))
- def load_img(img_path, h, w):
- if os.path.isfile(img_path):
- print('successful load img: {0}'.format(img_path))
- else:
- print('not found file: {0}'.format(img_path))
- sys.exit(0)
- filename = img_path.split('/')[-1]
- img = cv2.imread(img_path)
- if h and w:
- img = cv2.resize(img, (int(w), int(h)))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = img / 255.0
- img = img - 0.5
- img = img * 2.0
- print('input image shape: ', img.shape)
- return img, filename
- def load_classification_pb(class_filename, mem_frac = 0.5, input_name = 'input',
- output_name = 'InceptionV4/Logits/Predictions:0', # 'MobilenetV1/Predictions/Reshape_1:0',
- type = tf.float32):
- # Load classification model
- classification_graph = tf.Graph()
- with classification_graph.as_default():
- class_graph_def = tf.GraphDef()
- with tf.gfile.GFile(class_filename, 'rb') as fid:
- serialized_graph = fid.read()
- class_graph_def.ParseFromString(serialized_graph)
- class_image = tf.placeholder(type, shape=(None, None, None, 3))
- tf.import_graph_def(class_graph_def, {input_name : class_image}, name = '')
- predictions = classification_graph.get_tensor_by_name(output_name)
- config = tf.ConfigProto()
- config.gpu_options.per_process_gpu_memory_fraction = mem_frac
- config.allow_soft_placement = True
- config.log_device_placement = False
- sess = tf.Session(graph = classification_graph, config = config)
- width = None
- height = None
- labels = None
- try:
- shape_tensor = classification_graph.get_tensor_by_name('input_size:0')
- labels_tensor = classification_graph.get_tensor_by_name('label_names:0')
- shape, labels = sess.run([shape_tensor, labels_tensor])
- width, height, _ = shape
- print(shape, labels)
- except:
- pass
- return class_image, predictions, classification_graph, sess, width, height, labels
- def main():
- args = get_arguments()
- if args.img_path[-4] != '.':
- files = GetAllFilesListRecusive(args.img_path, ['.jpg', '.jpeg', '.png'])
- else:
- files = [args.img_path]
- image_tensor, predictions, graph, sess, width, height, labels = load_classification_pb(args.pb_file)
- if args.measure_time:
- calculate_perfomance(sess, image_tensor, predictions, (height, width), args.runs, args.batch_size)
- quit()
- total = 0
- correct = 0
- for path in files:
- img, filename = load_img(path, height, width)
- if args.with_score:
- t = path[ : path.rfind('/')]
- cl = t[t.rfind('/') + 1 : ]
- image_np_expanded = np.expand_dims(img, axis = 0)
- # if args.pb_file != '':
- # img = np.expand_dims(img, axis = 0)
- t = time.time()
- preds = sess.run(
- [predictions], feed_dict = {image_tensor: image_np_expanded})[0][0]
- print('time: ', (time.time() - t) * 1000.0)
- indx = np.argmax(preds)
- if args.with_score:
- print(labels[indx].decode("utf-8"), cl)
- if labels[indx].decode("utf-8") == cl:
- correct = correct + 1
- total = total + 1
- print('class: ', labels[indx])
- if args.with_score:
- print('Correct score: ', (correct / float(total)) * 100.0)
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement