Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import tensorflow as tf
- FLAGS = tf.app.flags.FLAGS
- tf.app.flags.DEFINE_integer('max_steps', 1000, "Number of rounds of testing to run.")
- tf.app.flags.DEFINE_integer('batch_size', 256, "Number of images to process in a batch.")
- tf.app.flags.DEFINE_integer('num_gpus', 4, """How many GPUs to use.""")
- tf.app.flags.DEFINE_boolean('log_device_placement', False, "Whether to log device placement.")
- def train():
- with tf.Graph().as_default(), tf.device('/cpu:0'):
- images, labels = inputs(mode='train', batch_size=FLAGS.batch_size*FLAGS.num_gpus, no_threads=4)
- # Build an initialization operation
- init = tf.initialize_all_variables()
- # Start running the operations
- sess = tf.Session(config=tf.ConfigProto(
- allow_soft_placement=True,
- log_device_placement=FLAGS.log_device_placement))
- sess.run(init)
- # Start queue runners
- tf.train.start_queue_runners(sess=sess)
- # Iterate through testing steps
- for step in range(FLAGS.max_steps):
- # Do one step of testing and time it
- start_time = time.time()
- _ = sess.run(images)
- duration = time.time() - start_time
- print(duration)
- def _read_and_decode(filename_queue):
- reader = tf.TFRecordReader()
- _, serialized_example = reader.read(filename_queue)
- features = tf.parse_single_example(
- serialized_example,
- features={
- 'label': tf.FixedLenFeature([], tf.int64),
- 'image_raw': tf.FixedLenFeature([], tf.string)
- })
- image = tf.image.decode_jpeg(features['image_raw'], channels=3)
- image = tf.random_crop(image, [IMAGE_SIZE_DISTORTED, IMAGE_SIZE_DISTORTED,
- IMAGE_DEPTH])
- distorted_image = tf.cast(image, tf.float32)
- return distorted_image, features['label']
- def _input_pipeline(filenames, batch_size, num_epochs=None, shuffle=False,
- no_threads=1):
- filename_queue = tf.train.string_input_producer(filenames,
- num_epochs=num_epochs,
- shuffle=False)
- example, label = _read_and_decode(filename_queue=filename_queue)
- min_after_dequeue = 1000
- capacity = 1000 + 4 * batch_size
- if shuffle:
- example_batch, label_batch = tf.train.shuffle_batch([example, label],
- batch_size=batch_size,
- capacity=capacity,
- min_after_dequeue=min_after_dequeue,
- num_threads=no_threads)
- else:
- example_batch, label_batch = tf.train.batch([example, label],
- batch_size=batch_size,
- capacity=capacity,
- num_threads=no_threads)
- return example_batch, label_batch
- def inputs(mode="train", batch_size=64, no_threads=5):
- if mode == "train":
- files = os.listdir(TF_RECORDS_DIR_TRAIN)
- filenames = [os.path.join(TF_RECORDS_DIR_TRAIN, file) for file in files]
- shuffle = True
- elif mode == "eval":
- files = os.listdir(TF_RECORDS_DIR_EVAL)
- filenames = [os.path.join(TF_RECORDS_DIR_EVAL, file) for file in files]
- shuffle = False
- else:
- raise NotImplementedError("Please specify supported mode. Supported mode paramater values are: train and eval")
- return _input_pipeline(filenames=filenames, batch_size=batch_size, shuffle=shuffle, no_threads=no_threads)
- def main(argv=None):
- train()
- if __name__ == '__main__':
- tf.app.run()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement