Advertisement
Jeremiah_

cifar10.py

Feb 18th, 2020
129
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 13.28 KB | None | 0 0
  1. # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. #     http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15.  
  16. """Builds the CIFAR-10 network.
  17.  
  18. Summary of available functions:
  19.  
  20. # Compute input images and labels for training. If you would like to run
  21. # evaluations, use inputs() instead.
  22. inputs, labels = distorted_inputs()
  23.  
  24. # Compute inference on the model inputs to make a prediction.
  25. predictions = inference(inputs)
  26.  
  27. # Compute the total loss of the prediction with respect to the labels.
  28. loss = loss(predictions, labels)
  29.  
  30. # Create a graph to run one step of training with respect to the loss.
  31. train_op = train(loss, global_step)
  32. """
  33. # pylint: disable=missing-docstring
  34. from __future__ import absolute_import
  35. from __future__ import division
  36. from __future__ import print_function
  37.  
  38. import re
  39.  
  40. import tensorflow as tf
  41.  
  42. import cifar10_input
  43.  
  44.  
  45. FLAGS = tf.compat.v1.flags.FLAGS
  46.  
  47. # Basic model parameters.
  48. tf.compat.v1.flags.DEFINE_integer('batch_size', 128,
  49.                             """Number of images to process in a batch.""")
  50. tf.compat.v1.flags.DEFINE_boolean('use_fp16', False,
  51.                             """Train the model using fp16.""")
  52.  
  53. # Global constants describing the CIFAR-10 data set.
  54. IMAGE_SIZE = cifar10_input.IMAGE_SIZE
  55. NUM_CLASSES = cifar10_input.NUM_CLASSES
  56. NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  57. NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
  58.  
  59.  
  60. # Constants describing the training process.
  61. MOVING_AVERAGE_DECAY = 0.9999     # The decay to use for the moving average.
  62. NUM_EPOCHS_PER_DECAY = 350.0      # Epochs after which learning rate decays.
  63. LEARNING_RATE_DECAY_FACTOR = 0.1  # Learning rate decay factor.
  64. INITIAL_LEARNING_RATE = 0.1       # Initial learning rate.
  65.  
  66. # If a model is trained with multiple GPUs, prefix all Op names with tower_name
  67. # to differentiate the operations. Note that this prefix is removed from the
  68. # names of the summaries when visualizing a model.
  69. TOWER_NAME = 'tower'
  70.  
  71.  
  72. def _activation_summary(x):
  73.   """Helper to create summaries for activations.
  74.  
  75.  Creates a summary that provides a histogram of activations.
  76.  Creates a summary that measures the sparsity of activations.
  77.  
  78.  Args:
  79.    x: Tensor
  80.  Returns:
  81.    nothing
  82.  """
  83.   # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  84.   # session. This helps the clarity of presentation on tensorboard.
  85.   tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  86.   tf.summary.histogram(tensor_name + '/activations', x)
  87.   tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
  88.  
  89.  
  90. def _variable_on_cpu(name, shape, initializer):
  91.   """Helper to create a Variable stored on CPU memory.
  92.  
  93.  Args:
  94.    name: name of the variable
  95.    shape: list of ints
  96.    initializer: initializer for Variable
  97.  
  98.  Returns:
  99.    Variable Tensor
  100.  """
  101.   with tf.device('/cpu:0'):
  102.     dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  103.     var = tf.compat.v1.get_variable(name, shape, initializer=initializer, dtype=dtype)
  104.   return var
  105.  
  106.  
  107. def _variable_with_weight_decay(name, shape, stddev, wd):
  108.   """Helper to create an initialized Variable with weight decay.
  109.  
  110.  Note that the Variable is initialized with a truncated normal distribution.
  111.  A weight decay is added only if one is specified.
  112.  
  113.  Args:
  114.    name: name of the variable
  115.    shape: list of ints
  116.    stddev: standard deviation of a truncated Gaussian
  117.    wd: add L2Loss weight decay multiplied by this float. If None, weight
  118.        decay is not added for this Variable.
  119.  
  120.  Returns:
  121.    Variable Tensor
  122.  """
  123.   dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  124.   var = _variable_on_cpu(
  125.       name,
  126.       shape,
  127.       tf.compat.v1.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  128.   if wd is not None:
  129.     weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
  130.     tf.compat.v1.add_to_collection('losses', weight_decay)
  131.   return var
  132.  
  133.  
  134. def distorted_inputs():
  135.   """Construct distorted input for CIFAR training using the Reader ops.
  136.  
  137.  Returns:
  138.    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  139.    labels: Labels. 1D tensor of [batch_size] size.
  140.  """
  141.   images, labels = cifar10_input.distorted_inputs(batch_size=FLAGS.batch_size)
  142.   if FLAGS.use_fp16:
  143.     images = tf.cast(images, tf.float16)
  144.     labels = tf.cast(labels, tf.float16)
  145.   return images, labels
  146.  
  147.  
  148. def inputs(eval_data):
  149.   """Construct input for CIFAR evaluation using the Reader ops.
  150.  Args:
  151.    eval_data: bool, indicating if one should use the train or eval data set.
  152.  
  153.  Returns:
  154.    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  155.    labels: Labels. 1D tensor of [batch_size] size.
  156.  """
  157.   images, labels = cifar10_input.inputs(eval_data=eval_data, batch_size=FLAGS.batch_size)
  158.   if FLAGS.use_fp16:
  159.     images = tf.cast(images, tf.float16)
  160.     labels = tf.cast(labels, tf.float16)
  161.   return images, labels
  162.  
  163.  
  164. def inference(images):
  165.   """Build the CIFAR-10 model.
  166.  
  167.  Args:
  168.    images: Images returned from distorted_inputs() or inputs().
  169.  
  170.  Returns:
  171.    Logits.
  172.  """
  173.   # We instantiate all variables using tf.get_variable() instead of
  174.   # tf.Variable() in order to share variables across multiple GPU training runs.
  175.   # If we only ran this model on a single GPU, we could simplify this function
  176.   # by replacing all instances of tf.get_variable() with tf.Variable().
  177.   #
  178.   # conv1
  179.   with tf.compat.v1.variable_scope('conv1') as scope:
  180.     kernel = _variable_with_weight_decay('weights',
  181.                                          shape=[5, 5, 3, 64],
  182.                                          stddev=5e-2,
  183.                                          wd=None)
  184.     conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
  185.     biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
  186.     pre_activation = tf.nn.bias_add(conv, biases)
  187.     conv1 = tf.nn.relu(pre_activation, name=scope.name)
  188.     _activation_summary(conv1)
  189.  
  190.   # pool1
  191.   pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  192.                          padding='SAME', name='pool1')
  193.   # norm1
  194.   norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  195.                     name='norm1')
  196.  
  197.   # conv2
  198.   with tf.compat.v1.variable_scope('conv2') as scope:
  199.     kernel = _variable_with_weight_decay('weights',
  200.                                          shape=[5, 5, 64, 64],
  201.                                          stddev=5e-2,
  202.                                          wd=None)
  203.     conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
  204.     biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
  205.     pre_activation = tf.nn.bias_add(conv, biases)
  206.     conv2 = tf.nn.relu(pre_activation, name=scope.name)
  207.     _activation_summary(conv2)
  208.  
  209.   # norm2
  210.   norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  211.                     name='norm2')
  212.   # pool2
  213.   pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
  214.                          strides=[1, 2, 2, 1], padding='SAME', name='pool2')
  215.  
  216.   # local3
  217.   with tf.compat.v1.variable_scope('local3') as scope:
  218.     # Move everything into depth so we can perform a single matrix multiply.
  219.     reshape = tf.keras.layers.Flatten()(pool2)
  220.     dim = reshape.get_shape()[1]
  221.     weights = _variable_with_weight_decay('weights', shape=[dim, 384],
  222.                                           stddev=0.04, wd=0.004)
  223.     biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
  224.     #reshape = tf.cast(reshape, tf.float16)
  225.     local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  226.     _activation_summary(local3)
  227.  
  228.   # local4
  229.   with tf.compat.v1.variable_scope('local4') as scope:
  230.     weights = _variable_with_weight_decay('weights', shape=[384, 192],
  231.                                           stddev=0.04, wd=0.004)
  232.     biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
  233.     #local4 = tf.cast(local3, tf.float16)
  234.     local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
  235.     _activation_summary(local4)
  236.  
  237.   # linear layer(WX + b),
  238.   # We don't apply softmax here because
  239.   # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
  240.   # and performs the softmax internally for efficiency.
  241.   with tf.compat.v1.variable_scope('softmax_linear') as scope:
  242.     weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
  243.                                           stddev=1/192.0, wd=None)
  244.     biases = _variable_on_cpu('biases', [NUM_CLASSES],
  245.                               tf.constant_initializer(0.0))
  246.     #local4 = tf.cast(local4, tf.float16)
  247.     #weights = tf.cast(weights, tf.float32)
  248.     #biases = tf.cast(weights, tf.float32)
  249.    
  250.    
  251.     softmax_linear = tf.math.add(tf.matmul(local4, weights), biases, name=scope.name)
  252.     _activation_summary(softmax_linear)
  253.  
  254.   return softmax_linear
  255.  
  256.  
  257. def loss(logits, labels):
  258.   """Add L2Loss to all the trainable variables.
  259.  
  260.  Add summary for "Loss" and "Loss/avg".
  261.  Args:
  262.    logits: Logits from inference().
  263.    labels: Labels from distorted_inputs or inputs(). 1-D tensor
  264.            of shape [batch_size]
  265.  
  266.  Returns:
  267.    Loss tensor of type float.
  268.  """
  269.   # Calculate the average cross entropy loss across the batch.
  270.   labels = tf.cast(labels, tf.int64)
  271.   cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
  272.       labels=labels, logits=logits, name='cross_entropy_per_example')
  273.   cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  274.   tf.compat.v1.add_to_collection('losses', cross_entropy_mean)
  275.  
  276.   # The total loss is defined as the cross entropy loss plus all of the weight
  277.   # decay terms (L2 loss).
  278.   return tf.add_n(tf.compat.v1.get_collection('losses'), name='total_loss')
  279.  
  280.  
  281. def _add_loss_summaries(total_loss):
  282.   """Add summaries for losses in CIFAR-10 model.
  283.  
  284.  Generates moving average for all losses and associated summaries for
  285.  visualizing the performance of the network.
  286.  
  287.  Args:
  288.    total_loss: Total loss from loss().
  289.  Returns:
  290.    loss_averages_op: op for generating moving averages of losses.
  291.  """
  292.   # Compute the moving average of all individual losses and the total loss.
  293.   loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  294.   losses = tf.compat.v1.get_collection('losses')
  295.   loss_averages_op = loss_averages.apply(losses + [total_loss])
  296.  
  297.   # Attach a scalar summary to all individual losses and the total loss; do the
  298.   # same for the averaged version of the losses.
  299.   for l in losses + [total_loss]:
  300.     # Name each loss as '(raw)' and name the moving average version of the loss
  301.     # as the original loss name.
  302.     tf.summary.scalar(l.op.name + ' (raw)', l)
  303.     tf.summary.scalar(l.op.name, loss_averages.average(l))
  304.  
  305.   return loss_averages_op
  306.  
  307.  
  308. def train(total_loss, global_step):
  309.   """Train CIFAR-10 model.
  310.  
  311.  Create an optimizer and apply to all trainable variables. Add moving
  312.  average for all trainable variables.
  313.  
  314.  Args:
  315.    total_loss: Total loss from loss().
  316.    global_step: Integer Variable counting the number of training steps
  317.      processed.
  318.  Returns:
  319.    train_op: op for training.
  320.  """
  321.   # Variables that affect learning rate.
  322.   num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  323.   decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
  324.  
  325.   # Decay the learning rate exponentially based on the number of steps.
  326.   lr = tf.compat.v1.train.exponential_decay(INITIAL_LEARNING_RATE,
  327.                                   global_step,
  328.                                   decay_steps,
  329.                                   LEARNING_RATE_DECAY_FACTOR,
  330.                                   staircase=True)
  331.   tf.summary.scalar('learning_rate', lr)
  332.  
  333.   # Generate moving averages of all losses and associated summaries.
  334.   loss_averages_op = _add_loss_summaries(total_loss)
  335.  
  336.   # Compute gradients.
  337.   with tf.control_dependencies([loss_averages_op]):
  338.     opt = tf.compat.v1.train.GradientDescentOptimizer(lr)
  339.     grads = opt.compute_gradients(total_loss)
  340.  
  341.   # Apply gradients.
  342.   apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
  343.  
  344.   # Add histograms for trainable variables.
  345.   for var in tf.compat.v1.trainable_variables():
  346.     tf.summary.histogram(var.op.name, var)
  347.  
  348.   # Add histograms for gradients.
  349.   for grad, var in grads:
  350.     if grad is not None:
  351.       tf.summary.histogram(var.op.name + '/gradients', grad)
  352.  
  353.   # Track the moving averages of all trainable variables.
  354.   variable_averages = tf.train.ExponentialMovingAverage(
  355.       MOVING_AVERAGE_DECAY, global_step)
  356.   with tf.control_dependencies([apply_gradient_op]):
  357.     variables_averages_op = variable_averages.apply(tf.compat.v1.trainable_variables())
  358.  
  359.   return variables_averages_op
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement