Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- for var in tf.trainable_variables():
- if( 'resnet_v2_50/logits/weights' in var.name or 'resnet_v2_50/logits/biases' in var.name ):
- variables.append(var)
- grads = tf.gradients(loss, variables)
- saver = tf.train.Saver(var_list=variables)
- ...
- if (global_steps_np + 1) % 1000 == 0:
- saver.save(sess, save_path='model/dogClassification.ckpt', global_step=global_steps_np)
- import os
- import numpy as np
- import cv2
- import pandas as pd
- import tensorflow as tf
- import tensorflow.contrib as tc
- from tensorflow.contrib.slim.nets import resnet_v2
- is_training = True
- slim = tc.slim
- batch_size = 16
- class_types = 120
- img_w = resnet_v2.resnet_v2.default_image_size
- img_h = resnet_v2.resnet_v2.default_image_size
- img_c = 3
- global_steps = tf.train.get_or_create_global_step()
- decay_steps = 20000
- decay_rate = 0.1
- max_epoch = 100
- def dog_generator(data_dir, data_labels_file, img_width, img_height, batch_size, max_epoch):
- reading = True
- file_list = []
- data_labels = []
- print("training image folder: ", data_dir)
- print("training label file: ", data_labels_file)
- labels = pd.read_csv(data_labels_file, header=None)
- file_list = labels[0]
- data_labels = labels[1]
- data_id = set(data_labels)
- label_from_str_to_id = dict()
- label_from_id_to_str = dict()
- c = 0
- for i in data_id:
- label_from_str_to_id[i] = c
- label_from_id_to_str[c] = i
- c = c + 1
- start = 0
- epoch = 0
- while reading:
- random_order = np.random.permutation(len(file_list))
- x_batch = []
- y_batch = []
- for i in range(batch_size):
- index = 0 if ((i + start) >= len(file_list)) else i + start
- if (index == 0):
- epoch += 1
- img = cv2.imread(os.path.join(data_dir, file_list[random_order[index]] + '.jpg'))
- img = cv2.resize(img, (img_width, img_height))
- x_batch.append(img)
- y_batch.append(label_from_str_to_id[data_labels[random_order[index]]])
- if (epoch == max_epoch):
- reading = False
- start = index + 1
- x_batch = np.asarray(x_batch)
- y_batch = np.asarray(y_batch)
- x_batch = x_batch / 128.0 - 1.0
- yield x_batch, y_batch
- data = tf.placeholder(tf.float32, shape=[None, img_w, img_h, img_c])
- label = tf.placeholder(tf.int32, shape=[None])
- if is_training == True:
- g = dog_generator(os.path.join('data', 'train'),
- os.path.join('data', 'labels.csv'), img_w, img_h,
- batch_size, max_epoch)
- else:
- g = dog_generator(os.path.join('data', 'train'),
- os.path.join('data', 'labels.csv'), img_w, img_h,
- batch_size, 100)
- def peek(iterable):
- try:
- first, second = next(iterable)
- except StopIteration:
- return None
- return first, second, iterable
- variables = [global_steps]
- variables1001 = []
- with slim.arg_scope(resnet_v2.resnet_arg_scope()):
- nets, scope = resnet_v2.resnet_v2_50(data, num_classes=class_types, is_training=is_training)
- nets = tf.reshape(nets, [-1, class_types])
- for var in tf.trainable_variables():
- if( 'resnet_v2_50/logits/weights' in var.name or 'resnet_v2_50/logits/biases' in var.name ):
- variables.append(var)
- else:
- variables1001.append(var)
- with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
- loss = slim.losses.sparse_softmax_cross_entropy(nets, label)
- tf.summary.scalar("loss", loss)
- merged_summary = tf.summary.merge_all()
- pred_classes = tf.argmax(nets, axis=1)
- grads = tf.gradients(loss, variables)
- lr = tf.train.exponential_decay(learning_rate=0.001, global_step=global_steps, decay_steps=decay_steps,
- decay_rate=decay_rate)
- opt = tf.train.AdamOptimizer(lr)
- train_op = opt.apply_gradients(zip(grads, variables), global_step=global_steps)
- tf_writer = tf.summary.FileWriter(logdir='./')
- saver = tf.train.Saver(var_list=variables)
- saverImageNet = tf.train.Saver(var_list=variables1001)
- with tf.Session() as sess:
- writer = tf.summary.FileWriter('./', sess.graph)
- sess.run(tf.global_variables_initializer())
- saverImageNet.restore(sess, './resnet_v2_50_2017_04_14/resnet_v2_50.ckpt')
- model_file=tf.train.latest_checkpoint('./model/')
- print(model_file)
- saver.restore(sess, tf.train.latest_checkpoint('./model/'))
- times = 0
- accuracy = 0
- while True:
- res = peek(g)
- if res == None:
- print("End.")
- break
- else:
- input_imgs, input_labels, g = res
- input_labels = input_labels.astype(np.int32)
- if is_training == True:
- _, loss_np, summary, global_steps_np = sess.run([train_op, loss, merged_summary, global_steps], feed_dict={data: input_imgs, label: input_labels})
- writer.add_summary(summary, global_steps_np)
- if global_steps_np % 100 == 0:
- print("times: ", global_steps_np )
- print("loss: ", loss_np)
- if (global_steps_np + 1) % 1000 == 0:
- saver.save(sess, save_path='model/dogClassification.ckpt', global_step=global_steps_np)
- else:
- test, pred_classes_np = sess.run([nets, pred_classes], feed_dict={data: input_imgs})
- print(pred_classes_np)
- print(input_labels)
- times = times + 16
- for i in range(len(pred_classes_np)):
- if( pred_classes_np[i] == input_labels[i]):
- accuracy = accuracy + 1
- print(accuracy/times)
Add Comment
Please, Sign In to add comment