Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from random import shuffle
- W = 32
- np.random.seed(0)
- tf.set_random_seed(0)
- tf_w1 = tf.truncated_normal([2, 2, 1, 5], stddev=0.1)
- tf_w2 = tf.constant(0.1, shape=[5])
- tf_w5 = tf.truncated_normal([4*4*5, 2000], stddev=0.1)
- tf_w6 = tf.constant(0.1, shape=[2000])
- tf_w7 = tf.truncated_normal([2000, 2], stddev=0.1)
- tf_w8 = tf.constant(0.1, shape=[2])
- with tf.Session() as sess:
- w1, w2, w5, w6, w7, w8 = sess.run([tf_w1, tf_w2, tf_w5, tf_w6, tf_w7, tf_w8])
- x = tf.placeholder(tf.float32, shape=[None, W*W])
- y_true = tf.placeholder(tf.float32, shape=[None, 2])
- input_image = tf.reshape(x, [-1, W, W, 1])
- w_conv1 = tf.Variable(w1)
- b_conv1 = tf.Variable(w2)
- h_conv1 = tf.nn.relu(tf.nn.conv2d(input_image, w_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
- h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
- h_pool1_flat = tf.reshape(h_pool1, [-1, 4*4*5])
- w_fc1 = tf.Variable(w5)
- b_fc1 = tf.Variable(w6)
- h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool1_flat, w_fc1) + b_fc1)
- w_fc2 = tf.Variable(w7)
- b_fc2 = tf.Variable(w8)
- y_conv = (tf.matmul(h_fc1, w_fc2) + b_fc2) # softmax применяется в loss
- loss = tf.reduce_logsumexp(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_conv))
- train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_true, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- import pickle
- train_x = np.load('train_x.npy')
- train_y = np.load('train_y.npy')
- test_x = np.load('test_x.npy')
- test_y = np.load('test_y.npy')
- permut = list(range(len(train_y)))
- shuffle(permut)
- train_x = [train_x[i] for i in permut]
- train_y = [train_y[i] for i in permut]
- permut = list(range(len(test_y)))
- shuffle(permut)
- test_x = [test_x[i] for i in permut]
- test_y = [test_y[i] for i in permut]
- len_dataset = 1000
- with tf.Session() as sess:
- sess.run(tf.global_variables_initializer())
- accuracy_change = []
- loss_change = []
- for i in range(len(train_x)):
- train_accuracy, train_loss = sess.run([accuracy, loss], feed_dict={x:np.asarray(train_x[i]).reshape(1, W*W), y_true:np.asarray(train_y[i]).reshape(1, 2)})
- train_step.run(feed_dict={x:np.asarray(train_x[i]).reshape(1, W*W), y_true:np.asarray(train_y[i]).reshape(1, 2)})
- loss_change.append(train_loss)
- accuracy_change.append(train_accuracy)
- if len(loss_change)%len_dataset == 0:
- print('шаг:', len(loss_change), 'loss:', sum(loss_change[-len_dataset:])/len_dataset, 'accuracy:', sum(accuracy_change[-len_dataset:])/len_dataset)
- print('mnist test...')
- accuracy_change = []
- loss_change = []
- for i in range(len(test_x)):
- train_accuracy, train_loss = sess.run([accuracy, loss], feed_dict={x:np.asarray(test_x[i]).reshape(1, W*W), y_true:np.asarray(test_y[i]).reshape(1,2)})
- loss_change.append(train_loss)
- accuracy_change.append(train_accuracy)
- if len(loss_change)%len_dataset == 0:
- print('шаг:', len(loss_change), 'loss:', sum(loss_change[-len_dataset:])/len_dataset, 'accuracy:', sum(accuracy_change[-len_dataset:])/len_dataset)
- print('test_loss:', sum(loss_change)/len(loss_change), 'test_accuracy:', sum(accuracy_change)/len(accuracy_change))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement