Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from numpy.random import randint
- import tensorflow as tf
- from pprint import pprint
- NUM_DATAS = 100
- NUM_TESTS = 10
- INPUT_DIMENSION = 2
- OUTPUT_DIMENSION = 1
- OPTIMIZE_RATE = 0.5
- TRAIN_TIMES = 1000
- def createSuperviserData(nDatas):
- inData = []
- outData = []
- for i in range(nDatas):
- tmpIn = [
- #float(randint(18, 100)),
- float(randint(1, 3)),
- #float(randint(18, 100)),
- float(randint(1, 3)),
- ]
- if tmpIn[0] == tmpIn[1]:
- tmpOut = 0
- else:
- tmpOut = 1
- inData.append(tmpIn)
- outData.append(tmpOut)
- return [inData, outData]
- def inference(input_placeholder):
- W = tf.Variable(tf.zeros([INPUT_DIMENSION, OUTPUT_DIMENSION]), name='weight')
- b = tf.Variable(tf.zeros([OUTPUT_DIMENSION]), name='bias')
- y = tf.matmul(input_placeholder, W) + b
- return y
- def loss(logits, labels):
- pprint({'logits':logits, 'labels':labels})
- labels = tf.to_int64(labels)
- xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
- logits=logits, labels=labels)
- xentropy_mean = tf.reduce_mean(xentropy, name='loss')
- return xentropy_mean
- def training(loss):
- optimizer = tf.train.GradientDescentOptimizer(OPTIMIZE_RATE)
- train_step = optimizer.minimize(loss, name='training')
- return train_step
- def accuracy(logits, labels):
- current_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
- accuracy = tf.reduce_mean(tf.cast(current_prediction, tf.float32), name='accuracy')
- return accuracy
- if __name__ == '__main__':
- with tf.name_scope('superviser'):
- original_input, original_output = createSuperviserData(NUM_DATAS)
- test_input, test_output = createSuperviserData(NUM_TESTS)
- inputs = tf.placeholder(tf.float32, shape=(NUM_DATAS, INPUT_DIMENSION), name='x')
- labels = tf.placeholder(tf.int32, shape=(NUM_DATAS), name='y')
- feed_dict = {inputs: original_input, labels: original_output}
- test_feed_dict = {inputs: test_input, labels: test_output}
- pprint({'original_input': original_input, 'original_output': original_output,
- 'test_input': test_input, 'test_output': test_output})
- with tf.name_scope('main_op'):
- logits = inference(inputs)
- loss = loss(logits, labels)
- train_op = training(loss)
- pprint({'inputs':inputs,'labels':labels,'logits':logits,'loss':loss,
- 'train_op':train_op,'accuracy':accuracy})
- sess = tf.InteractiveSession()
- tf.global_variables_initializer().run()
- print()
- for i in range(TRAIN_TIMES):
- x, y = createSuperviserData(NUM_DATAS)
- sess.run(train_op, feed_dict={inputs:x, labels:y})
- if i % (TRAIN_TIMES//10) == 0:
- print('loss: %.6f' % sess.run(loss, feed_dict=test_feed_dict))
- accuracy = accuracy(logits, labels)
- print()
- print('loss: %.6f' % sess.run(loss, feed_dict=test_feed_dict))
- print('accuracy: %.6f' % sess.run(accuracy, feed_dict=test_feed_dict))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement