Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import h5py
- import numpy as np
- from sklearn.model_selection import train_test_split
- with h5py.File('preprocessed_skin_img_orig.hdf5', 'r') as hf:
- img_data = hf['preprocessed_skin_img_data'][:]
- label_data = hf["preprocessed_skin_img_label"][:]
- X_train, X_test, y_train, y_test = train_test_split(img_data, label_data, test_size=0.33, random_state=42)
- X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=42)
- #img_data_arr_resh = np.reshape(img_data, [29839,34560])
- onehot_label_data_train = tf.one_hot(y_train, 2)
- onehot_label_data_valid = tf.one_hot(y_valid, 2)
- onehot_label_data_test = tf.one_hot(y_test, 2)
- #label_data_arr_resh = np.reshape(label_data_as_array, [29839,1])
- x = tf.placeholder(tf.float32, shape=[None,128,270,1])
- y_ = tf.placeholder(tf.float32, shape=[None, 2])
- #x_image = tf.reshape(x, [-1, 128, 270, 1])
- def weight_variable(shape):
- initial = tf.truncated_normal(shape, stddev=0.1)
- return tf.Variable(initial)
- def bias_variable(shape):
- initial = tf.constant(0.1, shape=shape)
- return tf.Variable(initial)
- def conv2d(x, W):
- return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
- def max_pool_2x2(x):
- return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
- strides=[1, 2, 2, 1], padding='SAME')
- W_conv1 = weight_variable([5, 5, 1, 32])
- b_conv1 = bias_variable([32])
- #x_image = tf.reshape(img_data, [-1, 128, 270, 1])
- h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
- h_pool1 = max_pool_2x2(h_conv1)
- W_conv2 = weight_variable([5, 5, 32, 64])
- b_conv2 = bias_variable([64])
- h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
- h_pool2 = max_pool_2x2(h_conv2)
- W_fc1 = weight_variable([32*68*64, 1024])
- b_fc1 = bias_variable([1024])
- h_pool2_flat = tf.reshape(h_pool2, [-1, 32*68*64])
- h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
- keep_prob = tf.placeholder(tf.float32)
- h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
- W_fc2 = weight_variable([1024, 2])
- b_fc2 = bias_variable([2])
- y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
- cross_entropy = tf.reduce_mean(
- tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
- train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- with tf.Session() as sess:
- onehot_label_data_train = sess.run(onehot_label_data_train)
- onehot_label_data_valid = sess.run(onehot_label_data_valid)
- onehot_label_data_test = sess.run(onehot_label_data_test)
- sess.run(tf.global_variables_initializer())
- print('Training phase:\n') #print('Epoch %d'%(j))
- for j in range(10):
- batch_count = 0 #avg_train_acc = 0
- tot_valid_acc = 0
- for i in range(0, 13394, 128): #29839 215
- train_step.run(feed_dict={x:X_train[i:i+128,:,:,:],y_:onehot_label_data_train[i:i+128,:],keep_prob: 0.5})
- for k in range(0, 6597, 128): #29839 215
- batch_count += 1
- valid_accuracy = accuracy.eval(feed_dict={x:X_valid[k:k+128,:,:,:],y_:onehot_label_data_valid[k:k+128,:], keep_prob: 1.0})
- tot_valid_acc = tot_valid_acc + valid_accuracy
- avg_valid_acc = tot_valid_acc / batch_count
- print('epoch %d, validation accuracy %g' % (j,avg_valid_acc))
- print('Testing:\n') #for j in range(5): print('Epoch %d'%(j))
- batch_count = 0
- tot_test_acc = 0
- for i in range(0, 9846, 128):
- batch_count += 1
- test_accuracy = accuracy.eval(feed_dict={x:X_test[i:i+128,:,:,:],y_:onehot_label_data_test[i:i+128,:], keep_prob: 1.0})
- tot_test_acc = tot_test_acc + test_accuracy
- avg_test_acc = tot_test_acc / batch_count
- print('Test accuracy %g' % (avg_test_acc))
- '''
- Training phase:
- epoch 0, validation accuracy 0.779632
- epoch 1, validation accuracy 0.892505
- epoch 2, validation accuracy 0.937702
- epoch 3, validation accuracy 0.960538
- epoch 4, validation accuracy 0.970403
- epoch 5, validation accuracy 0.985753
- epoch 6, validation accuracy 0.991136
- epoch 7, validation accuracy 0.995042
- epoch 8, validation accuracy 0.997897
- epoch 9, validation accuracy 0.998197
- Testing:
- Test accuracy 0.998072
- '''
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement