Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """
- Created on Fri Oct 18 13:54:50 2019
- @author: Tuguldur
- """
- import numpy as np
- import matplotlib.pyplot as plt
- #import cifar_tools
- import tensorflow as tf
- tf.compat.v1.disable_eager_execution()
- names, data, labels = \
- cifar_tools.read_data('E:/Machine learning/CNN/Cifar_data/cifar-10-python/cifar-10-batches-py')
- x = tf.compat.v1.placeholder(tf.float32, [None, 24 * 24])
- y = tf.compat.v1.placeholder(tf.float32, [None, len(names)])
- W1 = tf.Variable(tf.compat.v1.random_normal([5, 5, 1, 64]))
- b1 = tf.Variable(tf.compat.v1.random_normal([64]))
- W2 = tf.Variable(tf.compat.v1.random_normal([5, 5, 64, 64]))
- b2 = tf.Variable(tf.compat.v1.random_normal([64]))
- W3 = tf.Variable(tf.compat.v1.random_normal([6*6*64, 1024]))
- b3 = tf.Variable(tf.compat.v1.random_normal([1024]))
- W_out = tf.Variable(tf.compat.v1.random_normal([1024, len(names)]))
- b_out = tf.Variable(tf.compat.v1.random_normal([len(names)]))
- def conv_layer(x, W, b):
- conv = tf.compat.v1.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
- conv_with_b = tf.compat.v1.nn.bias_add(conv, b)
- conv_out = tf.compat.v1.nn.relu(conv_with_b)
- return conv_out
- def maxpool_layer(conv, k=2):
- return tf.compat.v1.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
- def model():
- x_reshaped = tf.compat.v1.reshape(x, shape=[-1, 24, 24, 1])
- conv_out1 = conv_layer(x_reshaped, W1, b1)
- maxpool_out1 = maxpool_layer(conv_out1)
- norm1 = tf.compat.v1.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0,
- beta=0.75)
- conv_out2 = conv_layer(norm1, W2, b2)
- norm2 = tf.compat.v1.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
- maxpool_out2 = maxpool_layer(norm2)
- maxpool_reshaped = tf.compat.v1.reshape(maxpool_out2, [-1,
- W3.get_shape().as_list()[0]])
- local = tf.add(tf.compat.v1.matmul(maxpool_reshaped, W3), b3)
- local_out = tf.compat.v1.nn.relu(local)
- out = tf.add(tf.compat.v1.matmul(local_out, W_out), b_out)
- return out
- model_op = model()
- cost = tf.compat.v1.reduce_mean(
- tf.compat.v1.nn.softmax_cross_entropy_with_logits(logits=model_op, labels=y)
- )
- train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
- correct_pred = tf.equal(tf.compat.v1.argmax(model_op, 1), tf.argmax(y, 1))
- accuracy = tf.reduce_mean(tf.compat.v1.cast(correct_pred, tf.float32))
- with tf.compat.v1.Session() as sess:
- sess.run(tf.compat.v1.global_variables_initializer())
- onehot_labels = tf.compat.v1.one_hot(labels, len(names), on_value=1., off_value=0., axis=-1)
- onehot_vals = sess.run(onehot_labels)
- batch_size = len(data) // 200
- print('batch size', batch_size)
- for j in range(0, 1000):
- print('EPOCH', j)
- for i in range(0, len(data), batch_size):
- batch_data = data[i:i+batch_size, :]
- batch_onehot_vals = onehot_vals[i:i+batch_size, :]
- _, accuracy_val = sess.run([train_op, accuracy], feed_dict={x:batch_data, y: batch_onehot_vals})
- if i % 1000 == 0:
- print(i, accuracy_val)
- print('DONE WITH EPOCH')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement