Advertisement
Guest User

Untitled

a guest
Oct 23rd, 2018
61
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.97 KB | None | 0 0
  1. import numpy as np
  2. import tensorflow as tf
  3. import matplotlib.pyplot as plt
  4.  
  5. num_features = 2
  6. num_iter = 10000
  7. display_step = int(num_iter / 10)
  8. learning_rate = 0.0000001
  9.  
  10. num_input = 2 # units in the input layer 28x28 images
  11. num_hidden1 = 2 # units in the first hidden layer
  12. num_output = 1 # units in the output, only one output 0 or 1
  13.  
  14. #%% mlp function
  15.  
  16. def multi_layer_perceptron_xor(x, weights, biases):
  17.  
  18. hidden_layer1 = tf.add(tf.matmul(x, weights['w_h1']), biases['b_h1'])
  19. hidden_layer1 = tf.nn.sigmoid(hidden_layer1)
  20.  
  21. out_layer = tf.add(tf.matmul(hidden_layer1, weights['w_out']), biases['b_out'])
  22.  
  23. return out_layer
  24.  
  25. #%%
  26. x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], np.float32) # 4x2, input
  27. y = np.array([0, 1, 1, 0], np.float32) # 4, correct output, AND operation
  28. y = np.reshape(y, [4,1]) # convert to 4x1
  29.  
  30. # trainum_inputg data and labels
  31. X = tf.placeholder('float', [None, num_input]) # training data
  32. Y = tf.placeholder('float', [None, num_output]) # labels
  33.  
  34. # weights and biases
  35. weights = {
  36. 'w_h1' : tf.Variable(tf.random_normal([num_input, num_hidden1])), # w1, from input layer to hidden layer 1
  37. 'w_out': tf.Variable(tf.random_normal([num_hidden1, num_output])) # w2, from hidden layer 1 to output layer
  38. }
  39. biases = {
  40. 'b_h1' : tf.Variable(tf.zeros([num_hidden1])),
  41. 'b_out': tf.Variable(tf.zeros([num_output]))
  42. }
  43.  
  44. model = multi_layer_perceptron_xor(X, weights, biases)
  45.  
  46. '''
  47. - cost function and optimization
  48. - sigmoid cross entropy -- single output
  49. - softmax cross entropy -- multiple output, normalized
  50. '''
  51. loss_func = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=model, labels=Y))
  52. optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss_func)
  53.  
  54. sess = tf.Session()
  55. init = tf.global_variables_initializer()
  56. sess.run(init)
  57.  
  58. for k in range(num_iter):
  59. tmp_cost, _ = sess.run([loss_func, optimizer], feed_dict={X: x, Y: y})
  60. if k % display_step == 0:
  61. #print('output: ', sess.run(model, feed_dict={X:x}))
  62. print('loss= ' + "{:.5f}".format(tmp_cost))
  63.  
  64. # separates the input space
  65. W = np.squeeze(sess.run(weights['w_h1'])) # 2x2
  66. b = np.squeeze(sess.run(biases['b_h1'])) # 2,
  67.  
  68. sess.close()
  69.  
  70. #%%
  71. # Now plot the fitted line. We need only two points to plot the line
  72. plot_x = np.array([np.min(x[:, 0] - 0.2), np.max(x[:, 1]+0.2)])
  73. plot_y = -1 / W[1, 0] * (W[0, 0] * plot_x + b[0])
  74. plot_y = np.reshape(plot_y, [2, -1])
  75. plot_y = np.squeeze(plot_y)
  76.  
  77. plot_y2 = -1 / W[1, 1] * (W[0, 1] * plot_x + b[1])
  78. plot_y2 = np.reshape(plot_y2, [2, -1])
  79. plot_y2 = np.squeeze(plot_y2)
  80.  
  81. # plt.scatter(x[:, 0], x[:, 1], c=y, s=100, cmap='viridis')
  82. plt.plot(plot_x, plot_y) # line 1
  83. plt.plot(plot_x, plot_y2) # line 2
  84. plt.xlim([-0.2, 1.2]); plt.ylim([-0.2, 1.25]);
  85. #plt.text(0.425, 1.05, 'XOR', fontsize=14)
  86. plt.xticks([0.0, 0.5, 1.0]); plt.yticks([0.0, 0.5, 1.0])
  87. plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement