Advertisement
Guest User

Untitled

a guest
Nov 16th, 2017
168
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.56 KB | None | 0 0
  1.  
  2. image_size = 128
  3. num_channels = 1
  4. batch = 128
  5. descriptor_size = 512
  6.  
  7. def weight_variable(shape):
  8.     initial = tf.truncated_normal(shape, stddev=0.1)
  9.     return tf.Variable(initial)
  10.  
  11. def bias_variable(shape):
  12.     initial = tf.constant(0.1, shape=shape)
  13.     return tf.Variable(initial)
  14.  
  15. def conv2d(x, W):
  16.       return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
  17.  
  18. def max_pool_2x2(x):
  19.       return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
  20.  
  21. #INPUT
  22. xs = tf.placeholder(tf.float32, [None, image_size, image_size, num_channels])
  23. #DROPOUT
  24. #is_training = tf.placeholder(tf.bool)
  25. #CONV1
  26. W_conv1 = weight_variable([3, 3, 1, 128])
  27. b_conv1 = bias_variable([128])
  28. h_conv1 = tf.nn.relu(conv2d(xs, W_conv1) + b_conv1)
  29. #CONV2
  30. W_conv2 = weight_variable([3, 3, 128, 128])
  31. b_conv2 = bias_variable([128])
  32. h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
  33. #POOL1
  34. h_pool1 = max_pool_2x2(h_conv2)
  35. h_pool1_drop = h_pool1#tf.cond(is_training, lambda: tf.nn.dropout(h_pool1, keep_prob = 0.9), lambda: h_pool1)
  36. #CONV3
  37. W_conv3 = weight_variable([3, 3, 128, 128])
  38. b_conv3 = bias_variable([128])
  39. h_conv3 = tf.nn.relu(conv2d(h_pool1_drop, W_conv3) + b_conv3)
  40. #POOL2
  41. h_pool2 = max_pool_2x2(h_conv3)
  42. h_pool2_drop = h_pool2#tf.cond(is_training, lambda: tf.nn.dropout(h_pool2, keep_prob = 0.8), lambda: h_pool2)
  43. #LCONV1
  44. W_lconv1 = weight_variable([3, 3, 128, 256])
  45. b_lconv1 = bias_variable([256])
  46. h_lconv1 = tf.nn.relu(conv2d(h_pool2_drop, W_lconv1) + b_lconv1)
  47. #POOL3
  48. h_pool3 = max_pool_2x2(h_lconv1)
  49. h_pool3_drop = h_pool3#tf.cond(is_training, lambda: tf.nn.dropout(h_pool3, keep_prob = 0.7), lambda: h_pool3)
  50. #LCONV2
  51. W_lconv2 = weight_variable([3, 3, 256, 256])
  52. b_lconv2 = bias_variable([256])
  53. h_lconv2 = tf.nn.relu(conv2d(h_pool3_drop, W_lconv2) + b_lconv2)
  54. #POOL4
  55. h_pool4 = max_pool_2x2(h_lconv2)
  56. h_pool4_drop = h_pool4#tf.cond(is_training, lambda: tf.nn.dropout(h_pool4, keep_prob = 0.6), lambda: h_pool4)
  57. #LCONV3
  58. W_lconv3 = weight_variable([3, 3, 256, 256])
  59. b_lconv3 = bias_variable([256])
  60. h_lconv3 = tf.nn.relu(conv2d(h_pool4_drop, W_lconv3) + b_lconv3)
  61. #FLATTENING
  62. flat_h_pool4_drop = tf.reshape(h_pool4_drop, [-1, np.prod(h_pool4_drop.shape[1:]).value])
  63. flat_h_lconv3 = tf.reshape(h_lconv3, [-1, np.prod(h_lconv3.shape[1:]).value])
  64. flat_concat = tf.concat([flat_h_pool4_drop, flat_h_lconv3], 1)
  65. #FC1
  66. W_fc1 = weight_variable([flat_concat.get_shape().as_list()[1], descriptor_size])
  67. b_fc1 = bias_variable([descriptor_size])
  68. h_fc1 = tf.nn.bias_add(tf.matmul(flat_concat, W_fc1), b_fc1)# tf.matmul(flat_concat, W_fc1) + b_fc1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement