Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def forward_propagate(X,parameters):
- w1 = parameters["W1"]
- w2 = parameters["W2"]
- layer1 = tf.nn.conv2d(X,w1,strides=[1,1,1,1],padding="SAME")
- layer1_activation = tf.nn.relu(layer1)
- layer1_output = tf.nn.max_pool(layer1_activation,ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
- layer2 = tf.nn.conv2d(layer1_output,w2,strides=[1,1,1,1],padding="SAME")
- layer2_activation = tf.nn.relu(layer2)
- layer2_output = tf.nn.max_pool(layer2_activation,ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
- fltn = tf.contrib.layers.flatten(layer2_output)
- z3 = tf.contrib.layers.fully_connected(fltn,10,activation_fn=None)
- return z3
- def model_evaluate(trainX,trainY,testX,testY,predictX,alpha= 0.01,mini_batch = 64,num_epochs=50,print_cost=True):
- ops.reset_default_graph()
- tf.set_random_seed(1)
- seed = 3
- (m, n_H0, n_W0, n_C0) = trainX.shape
- n_y = trainY.shape[1]
- costs = []
- X, Y = createPlaceholders(n_H0,n_W0,n_C0,n_y)
- parameters = initialize_parameters()
- Z3 = forward_propagate(X, parameters)
- pred = forward_propagate(X,parameters)
- cost = compute_cost(Z3,Y)
- optimizer = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)
- init = tf.global_variables_initializer()
- with tf.Session() as sess:
- sess.run(init)
- for epoch in range(num_epochs):
- minibatch_cost = 0.
- num_minibatches = int(m / mini_batch)
- seed = seed + 1
- minibatches = random_mini_batches(trainX, trainY, mini_batch, seed)
- for minibatch in minibatches:
- (minibatch_X, minibatch_Y) = minibatch
- _ , temp_cost = sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
- minibatch_cost += temp_cost / num_minibatches
- if print_cost == True and epoch % 5 == 0:
- print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
- if print_cost == True and epoch % 1 == 0:
- costs.append(minibatch_cost)
- plt.plot(np.squeeze(costs))
- plt.ylabel('cost')
- plt.xlabel('iterations (per tens)')
- plt.title("Learning rate =" + str(alpha))
- plt.show()
- predict_op = tf.argmax(Z3, 1)
- correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
- print(accuracy)
- train_accuracy = accuracy.eval({X: trainX, Y: trainY})
- test_accuracy = accuracy.eval({X: testX, Y: testY})
- print("Train Accuracy:", train_accuracy)
- print("Test Accuracy:", test_accuracy)
- #this is where i am doing my prediction
- predict_test = tf.argmax(pred, 1)
- test_prediction = predict_test.eval(feed_dict={X:predictX})
- return train_accuracy, test_accuracy, parameters,test_prediction
- _, _, parameters,predictions= model_evaluate(trainx,trainy,testx,testy,test,alpha= 0.001,mini_batch = 64,num_epochs=40,print_cost=True)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement