Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import tensorflow as tf
- def file_len(fname):
- with open(fname) as f:
- for i, l in enumerate(f):
- pass
- return i + 1
- def convertData(digit):
- one_hot = np.zeros([10])
- one_hot[int(digit)] = 1
- return one_hot
- def getData(filename):
- file_name_queue = tf.train.string_input_producer([filename])
- reader = tf.TextLineReader()
- key, value = reader.read(file_name_queue)
- record_defaults = np.zeros([401, 1]).tolist()
- data = tf.decode_csv(value, record_defaults=record_defaults)
- features = tf.stack(data[:400])
- labels = tf.stack(data[400])
- x = np.array([])
- y = np.array([])
- with tf.Session() as sess:
- coord = tf.train.Coordinator()
- threads = tf.train.start_queue_runners(coord=coord)
- for i in range(file_len(filename)):
- tx, ty = sess.run([features, labels])
- if ty == -1:
- continue
- ty = convertData(ty)
- if len(x) == 0:
- x = np.array([tx])
- y = np.array([ty])
- else:
- x = np.vstack((x, tx))
- y = np.vstack((y, ty))
- coord.request_stop()
- coord.join(threads)
- return (x, y)
- def sigmoid(z):
- exp_node = tf.exp(z)
- adder_node = 1 + exp_node
- result_node = 1.0 / adder_node
- return result_node
- def logistic():
- W = tf.Variable(tf.zeros([40,40]), tf.float32)
- b = tf.Variable(tf.zeros([40]), tf.float32)
- x = tf.placeholder(tf.float32, [None, 40], name="x")
- linear_model = W * x + b
- hypothesis = sigmoid(linear_model)
- y = tf.placeholder(tf.float32)
- cost = -tf.reduce_mean(y * tf.log(hypothesis) +
- (1 - y) * tf.log(1 - hypothesis))
- optimizer = tf.train.GradientDescentOptimizer(0.1)
- train = optimizer.minimize(cost)
- x_train = [3, 3.5, 4.1, 4.3, 5.0, 5.2, 5.6, 6.0, 6.1, 6.5, 7.6, 7.9, 9.2, 9.9, 10.2]
- y_train = [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1]
- tf.summary.FileWriter("logisticGraph", tf.InteractiveSession().graph)
- init = tf.global_variables_initializer()
- sess = tf.Session()
- sess.run(init)
- for i in range(20000):
- sess.run(train, {x: x_train, y: y_train})
- # evaluate training accuracy
- if i % 1000 == 0:
- curr_W, curr_b, curr_loss = sess.run([W, b, cost], {x: x_train, y: y_train})
- print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss))
- check = sess.run(hypothesis, {x: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.5, 7.0, 8.0, 9.0, 10.0]})
- print(check)
- if __name__ == "__main__":
- a, b = getData("data.csv")
- print(a.shape)
- print(b.shape)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement