Advertisement
Guest User

Untitled

a guest
Dec 6th, 2019
130
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.15 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Fri Oct 18 13:54:50 2019
  4.  
  5. @author: Tuguldur
  6. """
  7.  
  8. import numpy as np
  9. import matplotlib.pyplot as plt
  10. #import cifar_tools
  11. import tensorflow as tf
  12. tf.compat.v1.disable_eager_execution()
  13.  
  14. names, data, labels = \
  15. cifar_tools.read_data('E:/Machine learning/CNN/Cifar_data/cifar-10-python/cifar-10-batches-py')
  16.  
  17. x = tf.compat.v1.placeholder(tf.float32, [None, 24 * 24])
  18. y = tf.compat.v1.placeholder(tf.float32, [None, len(names)])
  19. W1 = tf.Variable(tf.compat.v1.random_normal([5, 5, 1, 64]))
  20. b1 = tf.Variable(tf.compat.v1.random_normal([64]))
  21. W2 = tf.Variable(tf.compat.v1.random_normal([5, 5, 64, 64]))
  22. b2 = tf.Variable(tf.compat.v1.random_normal([64]))
  23. W3 = tf.Variable(tf.compat.v1.random_normal([6*6*64, 1024]))
  24. b3 = tf.Variable(tf.compat.v1.random_normal([1024]))
  25. W_out = tf.Variable(tf.compat.v1.random_normal([1024, len(names)]))
  26. b_out = tf.Variable(tf.compat.v1.random_normal([len(names)]))
  27.  
  28. def conv_layer(x, W, b):
  29. conv = tf.compat.v1.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
  30. conv_with_b = tf.compat.v1.nn.bias_add(conv, b)
  31. conv_out = tf.compat.v1.nn.relu(conv_with_b)
  32. return conv_out
  33.  
  34. def maxpool_layer(conv, k=2):
  35. return tf.compat.v1.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
  36.  
  37. def model():
  38. x_reshaped = tf.compat.v1.reshape(x, shape=[-1, 24, 24, 1])
  39. conv_out1 = conv_layer(x_reshaped, W1, b1)
  40. maxpool_out1 = maxpool_layer(conv_out1)
  41. norm1 = tf.compat.v1.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0,
  42. beta=0.75)
  43. conv_out2 = conv_layer(norm1, W2, b2)
  44. norm2 = tf.compat.v1.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
  45. maxpool_out2 = maxpool_layer(norm2)
  46. maxpool_reshaped = tf.compat.v1.reshape(maxpool_out2, [-1,
  47. W3.get_shape().as_list()[0]])
  48. local = tf.add(tf.compat.v1.matmul(maxpool_reshaped, W3), b3)
  49. local_out = tf.compat.v1.nn.relu(local)
  50. out = tf.add(tf.compat.v1.matmul(local_out, W_out), b_out)
  51. return out
  52.  
  53.  
  54.  
  55. model_op = model()
  56. cost = tf.compat.v1.reduce_mean(
  57. tf.compat.v1.nn.softmax_cross_entropy_with_logits(logits=model_op, labels=y)
  58. )
  59. train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
  60. correct_pred = tf.equal(tf.compat.v1.argmax(model_op, 1), tf.argmax(y, 1))
  61. accuracy = tf.reduce_mean(tf.compat.v1.cast(correct_pred, tf.float32))
  62.  
  63.  
  64.  
  65. with tf.compat.v1.Session() as sess:
  66. sess.run(tf.compat.v1.global_variables_initializer())
  67. onehot_labels = tf.compat.v1.one_hot(labels, len(names), on_value=1., off_value=0., axis=-1)
  68. onehot_vals = sess.run(onehot_labels)
  69. batch_size = len(data) // 200
  70. print('batch size', batch_size)
  71. for j in range(0, 1000):
  72. print('EPOCH', j)
  73. for i in range(0, len(data), batch_size):
  74. batch_data = data[i:i+batch_size, :]
  75. batch_onehot_vals = onehot_vals[i:i+batch_size, :]
  76. _, accuracy_val = sess.run([train_op, accuracy], feed_dict={x:batch_data, y: batch_onehot_vals})
  77. if i % 1000 == 0:
  78. print(i, accuracy_val)
  79. print('DONE WITH EPOCH')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement