Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import skimage.io # bug. need to import this before tensorflow
- import skimage.transform # bug. need to import this before tensorflow
- #from resnet_train import train
- import tensorflow as tf
- import time
- import os
- import sys
- import re
- import numpy as np
- from synset import *
- from image_processing import image_preprocessing
- import input_data
- from resnet34 import inference
- from resnet34 import inferencefinetune
- from resnet34 import fcf
- from resnet34 import *
- import tensorflow as tf
- import input_data
- from checkpoint import print_tensors_in_checkpoint_file
- MOMENTUM = 0.9
- def checkpoint_fn(layers):
- return './pretrained/ResNet-L%d.ckpt' % layers
- def top_k_error(predictions, labels, k):
- batch_size = float(FLAGS.batch_size) #tf.shape(predictions)[0]
- in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k))
- num_correct = tf.reduce_sum(in_top1)
- return (batch_size - num_correct) / batch_size
- os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
- os.environ["CUDA_VISIBLE_DEVICES"]="5"
- FLAGS = tf.app.flags.FLAGS
- tf.app.flags.DEFINE_string('train_dir', './tmp/resnet_train_0',
- """Directory where to write event logs """
- """and checkpoint.""")
- tf.app.flags.DEFINE_float('learning_rate', 0.01, "learning rate.")
- tf.app.flags.DEFINE_integer('batch_size', 16, "batch size")
- tf.app.flags.DEFINE_integer('max_steps', 500000, "max steps")
- tf.app.flags.DEFINE_boolean('resume', False,
- 'resume from latest saved state')
- tf.app.flags.DEFINE_boolean('minimal_summaries', True,
- 'produce fewer summaries to save HD space')
- # Path for tf.summary.FileWriter and to store model checkpoints
- filewriter_path = "./tmp/finetune_resnet0/log"
- checkpoint_path = "./tmp/finetune_resnet0/"
- # Create parent path if it doesn't exist
- if not os.path.isdir(checkpoint_path): os.mkdir(checkpoint_path)
- def main(_):
- is_training = tf.placeholder('bool', [], name='is_training')
- # logits, pools = inferencefinetune(images,
- # num_classes=1000,
- # is_training=True,
- # bottleneck=False,
- # num_blocks=[3, 4, 6, 3])
- xx=tf.placeholder(tf.float32,[16,224,224,3])
- #images=placeholder
- #labels=placehodler
- logit = inference(xx,
- num_classes=41,
- is_training=True,
- bottleneck=False,
- num_blocks=[3, 4, 6, 3]) # num_blocks = [2,2,2,2]
- global_step = tf.get_variable('global_step', [],
- initializer=tf.constant_initializer(0),
- trainable=False)
- val_step = tf.get_variable('val_step', [],
- initializer=tf.constant_initializer(0),
- trainable=False)
- yy=tf.placeholder(tf.int64, 16)
- loss1 = loss(logit, yy )
- predictions = tf.nn.softmax(logit)
- top1_error = top_k_error(predictions, yy, 1)
- # Evaluation op: Accuracy of the model
- #with tf.name_scope("accuracy"):
- # correct_pred = tf.equal(tf.argmax(predictions, 1), labels)
- # accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
- # Add the accuracy to the summary
- #tf.summary.scalar('accuracy', accuracy)
- # loss_avg
- ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
- tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss1]))
- tf.summary.scalar('loss_avg', ema.average(loss1))
- # validation stats
- ema = tf.train.ExponentialMovingAverage(0.9, val_step)
- val_op_ = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
- top1_error_avg = ema.average(top1_error)
- # tf.summary.scalar('val_top1_error_avg', top1_error_avg)
- # tf.summary.scalar('learning_rate', FLAGS.learning_rate)
- opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM)
- grads = opt.compute_gradients(loss1)
- for grad, var in grads:
- if grad is not None and not FLAGS.minimal_summaries:
- tf.summary.histogram(var.op.name + '/gradients', grad)
- apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
- # if not FLAGS.minimal_summaries:
- # # Display the training images in the visualizer.
- # tf.summary.image('images', images)
- # for var in tf.trainable_variables():
- # tf.summary.histogram(var.op.name, var)
- batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
- batchnorm_updates_op = tf.group(*batchnorm_updates)
- train_op = tf.group(apply_gradient_op, batchnorm_updates_op)
- summary_op = tf.summary.merge_all()
- init = tf.global_variables_initializer()
- sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
- sess.run(init)
- saver = tf.train.Saver(tf.global_variables())
- k1=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale5')
- k2=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale4')
- k3=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale3')
- k4=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale2')
- k5=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale1')
- k6=k1+k2+k3+k4+k5
- #print(k6)
- saver1 = tf.train.Saver(k6)
- saver1.restore(sess, './tmp/res50imagenet/model.ckpt-3M')
- #saver.restore(sess, './tmp/resnet_train_0/model(test).ckpt-240301')
- print('model loaded')
- with open('list/train.list','r') as lines:
- fcvid_path = '/mnt/hdd/ockwon/tensorflow1/1FPS/fcvid41yt8m3/'
- lines1 = list(lines)
- with open('list/test.list','r') as linesk:
- fcvid_path = '/mnt/hdd/ockwon/tensorflow1/1FPS/fcvid41yt8m3/'
- lines2 = list(linesk)
- start_time = time.time()
- for x in xrange(FLAGS.max_steps+1):
- image1, label1, _, _, _ = input_data.read_frame_and_label(
- filename=lines1,
- batch_size=FLAGS.batch_size,
- num_frames_per_clip=1,
- crop_size=224,
- shuffle=False)
- tf.train.start_queue_runners(sess=sess)
- # summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
- step = sess.run(global_step)
- i = [train_op, loss1]
- write_summary = step % 100 and step > 1
- # if write_summary:
- # i.append(summary_op)
- loss_, val_op = sess.run( [loss1, val_op_], feed_dict={
- xx : image1,
- yy : label1,
- is_training : True
- })
- o = sess.run(i, feed_dict={ is_training: True,
- xx : image1,
- yy : label1 })
- loss_value = o[1]
- #duration = time.time() - start_timeqq
- assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
- if step % 10 == 0:
- #examples_per_sec = FLAGS.batch_size / float(duration)
- format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f '
- 'sec/batch)')
- print('step', step, 'loss', loss_value)
- # if write_summary:
- # summary_str = o[2]
- # summary_writer.add_summary(summary_str, step)
- # Save the model checkpoint periodically.
- if step > 1 and step % 100 == 0:
- checkpoint_path = os.path.join(FLAGS.train_dir, 'model(new).ckpt')
- saver.save(sess, checkpoint_path, global_step=global_step)
- # Run validation periodically
- if step > 1 and step % 100 == 0:
- image2, label2, _, _, _ = input_data.read_frame_and_label(
- filename=lines2,
- batch_size=FLAGS.batch_size,
- num_frames_per_clip=1,
- crop_size=224,
- shuffle=False)
- #acc = sess.run([logits, accuracy], {is_training: False}, )
- #_, acc = sess.run([val_op, accuracy], {is_training: False})
- #print('Validation top1 accuracy %.2f' % acc)
- _, top1_error_value = sess.run([val_op_, top1_error], feed_dict={ is_training: False,
- xx : image2,
- yy : label2 })
- print('Validation top1 error %.2f' % top1_error_value) #cf = {}mag
- if __name__ == '__main__':
- tf.app.run()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement