Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import tensorflow as tf
- import pickle as pkl
- from functools import reduce
- from skimage.transform import resize
- def to_one_hot(y):
- shape_one_hot = (np.size(y, 0), 36)
- one_hot_vectors = np.zeros(shape_one_hot)
- for i, label in enumerate(y):
- one_hot_vectors[i, label] = 1
- return one_hot_vectors
- def rescale_images(x, new_shape):
- with tf.Session() as sess:
- to_resize = tf.image.resize_area(x.reshape([len(x), 56, 56, 1]), new_shape)
- resized = sess.run(to_resize)
- resized = resized.reshape([len(resized), reduce(lambda x, y: x * y, new_shape)])
- return resized
- def next_batch(x_train, y_train, batch_size):
- idx = np.arange(0, len(x_train))
- np.random.shuffle(idx)
- batch_idx = idx[:batch_size]
- batch_x = x_train[batch_idx]
- batch_y = y_train[batch_idx]
- return batch_x, batch_y
- def get_accuracy(x_val, y_val, x, y, y_, sess, keep_prob):
- correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- return sess.run(accuracy, feed_dict={x: x_val, y_: y_val, keep_prob: 1.0})
- def weight_variable(shape):
- initial = tf.truncated_normal(shape, stddev=0.1)
- return tf.Variable(initial)
- def bias_variable(shape):
- initial = tf.constant(0.1, shape=shape)
- return tf.Variable(initial)
- def conv2d(x, W):
- return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
- def max_pool_2x2(x):
- return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
- strides=[1, 2, 2, 1], padding='SAME')
- def train_model(x_train, y_train, x_val, y_val):
- # hyperparameters
- batch_size = 50
- epochs = 150
- # dimensions
- input_shape = np.size(x_train, 1)
- output_shape = 36
- x = tf.placeholder(tf.float32, [None, input_shape], name="network_input")
- y_ = tf.placeholder(tf.float32, [None, output_shape], name="network_output")
- x_image = tf.reshape(x, [-1, 28, 28, 1])
- W_conv1 = weight_variable([5, 5, 1, 32])
- b_conv1 = bias_variable([32])
- h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
- h_pool1 = max_pool_2x2(h_conv1)
- W_conv2 = weight_variable([5, 5, 32, 64])
- b_conv2 = bias_variable([64])
- h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
- h_pool2 = max_pool_2x2(h_conv2)
- W_fc1 = weight_variable([7 * 7 * 64, 1024])
- b_fc1 = bias_variable([1024])
- h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
- h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
- keep_prob = tf.placeholder(tf.float32)
- h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
- W_fc2 = weight_variable([1024, 36])
- b_fc2 = bias_variable([36])
- y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
- optimizer = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- sess = tf.InteractiveSession()
- sess.run(tf.global_variables_initializer())
- for epoch_idx in range(epochs):
- for batch_idx in range(400):
- batch_x, batch_y = next_batch(x_train, y_train, batch_size)
- sess.run(optimizer, feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.75})
- val_accuracy = accuracy.eval(feed_dict={x: x_val, y_: y_val, keep_prob: 1.0})
- print("Epoch %d, validation accuracy %g" % (epoch_idx, val_accuracy))
- def load_data(filename):
- with open(filename, 'rb') as file:
- x_train, y_train, x_val, y_val = pkl.load(file)
- return x_train, y_train, x_val, y_val
- def save_data(filename, data):
- with open(filename, 'wb') as output:
- pkl.dump(data, output, pkl.HIGHEST_PROTOCOL)
- if __name__ == '__main__':
- x_train, y_train, x_val, y_val = load_data('rotated-15_17.05.pkl')
- y_train = to_one_hot(y_train)
- y_val = to_one_hot(y_val)
- x_train = rescale_images(x_train, [28, 28])
- x_val = rescale_images(x_val, [28, 28])
- train_model(x_train, y_train, x_val, y_val)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement