Guest User

Untitled

a guest
May 23rd, 2018
57
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.33 KB | None | 0 0
  1. import tensorflow as tf
  2.  
  3.  
  4. def inference(images, batch_size, n_classes):
  5. """
  6. Build the model
  7. Args:
  8. images: image batch, 4D tensor, tf.float32, [batch_size, width, height, channels]
  9. Returns:
  10. output tensor with the computed logits, float, [batch_size, n_classes]
  11. """
  12. #conv1, shape = [kernel size, kernel size, channels, kernel numbers]
  13.  
  14. with tf.variable_scope('conv1') as scope:
  15. weights = tf.get_variable('weights',
  16. shape = [3,3,3, 16],
  17. dtype = tf.float32,
  18. initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
  19. biases = tf.get_variable('biases',
  20. shape=[16],
  21. dtype=tf.float32,
  22. initializer=tf.constant_initializer(0.1))
  23. conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME')
  24. pre_activation = tf.nn.bias_add(conv, biases)
  25. conv1 = tf.nn.relu(pre_activation, name= scope.name)
  26.  
  27. #pool1 and norm1
  28. with tf.variable_scope('pooling1_lrn') as scope:
  29. pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1],strides=[1,2,2,1],
  30. padding='SAME', name='pooling1')
  31. norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
  32. beta=0.75,name='norm1')
  33.  
  34.  
  35.  
  36. #conv2
  37. with tf.variable_scope('conv2') as scope:
  38. weights = tf.get_variable('weights',
  39. shape=[3,3,16,32],
  40. dtype=tf.float32,
  41. initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
  42. biases = tf.get_variable('biases',
  43. shape=[32],
  44. dtype=tf.float32,
  45. initializer=tf.constant_initializer(0.1))
  46. conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
  47. pre_activation = tf.nn.bias_add(conv, biases)
  48. conv2 = tf.nn.relu(pre_activation, name='conv2')
  49.  
  50. #pool2 and norm2
  51. with tf.variable_scope('pooling2_lrn') as scope:
  52. pool2 = tf.nn.max_pool(conv2, ksize=[1,3,3,1],strides=[1,2,2,1],
  53. padding='SAME', name='pooling1')
  54. norm2 = tf.nn.lrn(pool2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
  55. beta=0.75,name='norm2')
  56.  
  57.  
  58.  
  59. #conv3
  60. with tf.variable_scope('conv3') as scope:
  61. weights = tf.get_variable('weights',
  62. shape=[3,3,32,64],
  63. dtype=tf.float32,
  64. initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
  65. biases = tf.get_variable('biases',
  66. shape=[64],
  67. dtype=tf.float32,
  68. initializer=tf.constant_initializer(0.1))
  69. conv = tf.nn.conv2d(norm2, weights, strides=[1,1,1,1],padding='SAME')
  70. pre_activation = tf.nn.bias_add(conv, biases)
  71. conv3 = tf.nn.relu(pre_activation, name='conv3')
  72. #dropout
  73. conv3 = tf.nn.dropout(conv3, 0.85)
  74. #pool3 and norm3
  75. with tf.variable_scope('pooling3_lrn') as scope:
  76. pool3 = tf.nn.max_pool(conv3, ksize=[1,3,3,1],strides=[1,2,2,1],
  77. padding='SAME', name='pooling1')
  78. norm3 = tf.nn.lrn(pool3, depth_radius=4, bias=1.0, alpha=0.001/9.0,
  79. beta=0.75,name='norm3')
  80.  
  81.  
  82.  
  83. #conv4
  84. with tf.variable_scope('conv4') as scope:
  85. weights = tf.get_variable('weights',
  86. shape=[3,3,64,64],
  87. dtype=tf.float32,
  88. initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
  89. biases = tf.get_variable('biases',
  90. shape=[64],
  91. dtype=tf.float32,
  92. initializer=tf.constant_initializer(0.1))
  93. conv = tf.nn.conv2d(norm3, weights, strides=[1,1,1,1],padding='SAME')
  94. pre_activation = tf.nn.bias_add(conv, biases)
  95. conv4 = tf.nn.relu(pre_activation, name='conv4')
  96.  
  97. #pool4 and norm4
  98. with tf.variable_scope('pooling4_lrn') as scope:
  99. norm4 = tf.nn.lrn(conv4, depth_radius=4, bias=1.0, alpha=0.001/9.0,
  100. beta=0.75,name='norm4')
  101. pool4 = tf.nn.max_pool(norm4, ksize=[1,3,3,1], strides=[1,1,1,1],
  102. padding='SAME',name='pooling4')
  103.  
  104.  
  105.  
  106.  
  107. #local3
  108. with tf.variable_scope('local3') as scope:
  109. reshape = tf.reshape(pool4, shape=[batch_size, -1])
  110. dim = reshape.get_shape()[1].value
  111. weights = tf.get_variable('weights',
  112. shape=[dim,128],
  113. dtype=tf.float32,
  114. initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
  115. biases = tf.get_variable('biases',
  116. shape=[128],
  117. dtype=tf.float32,
  118. initializer=tf.constant_initializer(0.1))
  119. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  120.  
  121. #local4
  122. with tf.variable_scope('local4') as scope:
  123. weights = tf.get_variable('weights',
  124. shape=[128,128],
  125. dtype=tf.float32,
  126. initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
  127. biases = tf.get_variable('biases',
  128. shape=[128],
  129. dtype=tf.float32,
  130. initializer=tf.constant_initializer(0.1))
  131. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
  132.  
  133.  
  134. # softmax
  135. with tf.variable_scope('softmax_linear') as scope:
  136. weights = tf.get_variable('softmax_linear',
  137. shape=[128, n_classes],
  138. dtype=tf.float32,
  139. initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
  140. biases = tf.get_variable('biases',
  141. shape=[n_classes],
  142. dtype=tf.float32,
  143. initializer=tf.constant_initializer(0.1))
  144. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
  145.  
  146. return softmax_linear
  147.  
  148.  
  149. def losses(logits, labels):
  150. """
  151. Compute loss from logits and labels
  152. Args:
  153. logits: logits tensor, float, [batch_size, n_classes]
  154. labels: label tensor, tf.int32, [batch_size]
  155. Returns:
  156. loss tensor of float type
  157. """
  158. with tf.variable_scope('loss') as scope:
  159. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits\
  160. (logits=logits, labels=labels, name='xentropy_per_example')
  161. loss = tf.reduce_mean(cross_entropy, name='loss')
  162. tf.summary.scalar(scope.name+'/loss', loss)
  163. return loss
  164.  
  165.  
  166. def trainning(loss, learning_rate):
  167. """
  168. Training ops, the Op returned by this function is what must be passed to
  169. 'sess.run()' call to cause the model to train.
  170. Args:
  171. loss: loss tensor, from losses()
  172. Returns:
  173. train_op: The op for trainning
  174. """
  175. with tf.name_scope('optimizer'):
  176. optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
  177. global_step = tf.Variable(0, name='global_step', trainable=False)
  178. train_op = optimizer.minimize(loss, global_step= global_step)
  179. return train_op
  180.  
  181.  
  182. def evaluation(logits, labels):
  183. """
  184. Evaluate the quality of the logits at predicting the label.
  185. Args:
  186. logits: Logits tensor, float - [batch_size, NUM_CLASSES].
  187. labels: Labels tensor, int32 - [batch_size], with values in the
  188. range [0, NUM_CLASSES).
  189. Returns:
  190. A scalar int32 tensor with the number of examples (out of batch_size)
  191. that were predicted correctly.
  192. """
  193. with tf.variable_scope('accuracy') as scope:
  194. correct = tf.nn.in_top_k(logits, labels, 1)
  195. correct = tf.cast(correct, tf.float16)
  196. accuracy = tf.reduce_mean(correct)
  197. tf.summary.scalar(scope.name+'/accuracy', accuracy)
  198. return accuracy
Add Comment
Please, Sign In to add comment