Advertisement
Guest User

Untitled

a guest
Mar 19th, 2018
76
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.74 KB | None | 0 0
  1. #!/usr/bin/env python3
  2. import numpy as np
  3. import tensorflow as tf
  4.  
  5. class Network:
  6.     WIDTH = 28
  7.     HEIGHT = 28
  8.     LABELS = 10
  9.  
  10.     def __init__(self, threads, seed=42):
  11.         # Create an empty graph and a session
  12.         graph = tf.Graph()
  13.         graph.seed = seed
  14.         self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
  15.                                                                        intra_op_parallelism_threads=threads))
  16.  
  17.     def construct(self, args):
  18.         with self.session.graph.as_default():
  19.             # Inputs
  20.             self.images = tf.placeholder(tf.float32, [None, self.WIDTH, self.HEIGHT, 1], name="images")
  21.             self.labels = tf.placeholder(tf.int64, [None], name="labels")
  22.  
  23.             # Computation
  24.             flattened_images = tf.layers.flatten(self.images, name="flatten")
  25.             hidden_layer = tf.layers.dense(flattened_images, args.hidden_layer, activation=tf.nn.relu, name="hidden_layer")
  26.             output_layer = tf.layers.dense(hidden_layer, self.LABELS, activation=None, name="output_layer")
  27.             self.predictions = tf.argmax(output_layer, axis=1)
  28.  
  29.             # Training
  30.             loss = tf.losses.sparse_softmax_cross_entropy(self.labels, output_layer, scope="loss")
  31.             global_step = tf.train.create_global_step()
  32.  
  33.             # TODO: Create `optimizer` according to arguments ("SGD", "SGD" with momentum, or "Adam"),
  34.             # utilizing specified learning rate according to args.learning_rate and args.learning_rate_final.
  35.             if args.learning_rate_final != None:
  36.                 #self.global_step = tf.Variable(0, trainable=False)
  37.                 step_counter = tf.Variable(0, name='step_counter', trainable=False, dtype=tf.int32)
  38.                 self.increment_counter = tf.assign(step_counter, step_counter + 1)
  39.                 learning_rate = tf.train.exponential_decay(
  40.                     args.learning_rate,
  41.                     #self.global_step,
  42.                     step_counter,
  43.                     #batches_per_epoch,
  44.                     1,
  45.                     (args.learning_rate_final / args.learning_rate) ** (1 / (args.epochs - 1)),
  46.                     staircase=True)
  47.             else:
  48.                 learning_rate = args.learning_rate
  49.  
  50.             if args.optimizer.lower() == "adam":
  51.                 optimizer = tf.train.AdamOptimizer(learning_rate)
  52.             else:
  53.                 if args.momentum != None:
  54.                     optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=args.momentum)
  55.                 else:
  56.                     optimizer = tf.train.GradientDescentOptimizer(learning_rate, False)
  57.             self.training = optimizer.minimize(loss, global_step=global_step, name="training")
  58.  
  59.             # Summaries
  60.             accuracy = tf.reduce_mean(tf.cast(tf.equal(self.labels, self.predictions), tf.float32))
  61.             self.accuracy = accuracy
  62.             summary_writer = tf.contrib.summary.create_file_writer(args.logdir, flush_millis=10 * 1000)
  63.             self.summaries = {}
  64.             with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(100):
  65.                 self.summaries["train"] = [tf.contrib.summary.scalar("train/loss", loss),
  66.                                            tf.contrib.summary.scalar("train/accuracy", accuracy)]
  67.             with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
  68.                 for dataset in ["dev", "test"]:
  69.                     self.summaries[dataset] = tf.contrib.summary.scalar(dataset + "/accuracy", accuracy)
  70.  
  71.             # Initialize variables
  72.             self.session.run(tf.global_variables_initializer())
  73.             with summary_writer.as_default():
  74.                 tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
  75.  
  76.     def train(self, images, labels):
  77.         self.session.run([self.training, self.summaries["train"]], {self.images: images, self.labels: labels})
  78.  
  79.     def evaluate(self, dataset, images, labels):
  80.         self.session.run([self.accuracy, self.summaries[dataset], self.increment_counter], {self.images: images, self.labels: labels})
  81.  
  82.  
  83. if __name__ == "__main__":
  84.     import argparse
  85.     import datetime
  86.     import os
  87.     import re
  88.  
  89.     # Fix random seed
  90.     np.random.seed(42)
  91.  
  92.     # Parse arguments
  93.     parser = argparse.ArgumentParser()
  94.     parser.add_argument("--batch_size", default=50, type=int, help="Batch size.")
  95.     parser.add_argument("--epochs", default=20, type=int, help="Number of epochs.")
  96.     parser.add_argument("--hidden_layer", default=200, type=int, help="Size of the hidden layer.")
  97.     parser.add_argument("--learning_rate", default=0.01, type=float, help="Initial learning rate.")
  98.     parser.add_argument("--learning_rate_final", default=None, type=float, help="Final learning rate.")
  99.     parser.add_argument("--momentum", default=None, type=float, help="Momentum.")
  100.     parser.add_argument("--optimizer", default="SGD", type=str, help="Optimizer to use.")
  101.     parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
  102.     args = parser.parse_args()
  103.  
  104.     # Create logdir name
  105.     args.logdir = "logs/{}-{}-{}".format(
  106.         os.path.basename(__file__),
  107.         datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
  108.         ",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
  109.     )
  110.     if not os.path.exists("logs"): os.mkdir("logs") # TF 1.6 will do this by itself
  111.  
  112.     # Load the data
  113.     from tensorflow.examples.tutorials import mnist
  114.     # Note that because of current ReCodEx limitations, the MNIST dataset must
  115.     # be read from the directory of the script -- that is why the "mnist_data/"
  116.     # from `mnist_example.py` has been changed to current directory ".".
  117.     #
  118.     # Additionally, loading of the dataset prints to stdout -- this loading message
  119.     # is part of expected output when evaluating on ReCodEx.
  120.     mnist = mnist.input_data.read_data_sets(".", reshape=False, seed=42)
  121.     batches_per_epoch = mnist.train.num_examples // args.batch_size
  122.  
  123.     # Construct the network
  124.     network = Network(threads=args.threads)
  125.     network.construct(args)
  126.  
  127.     # Train
  128.     for i in range(args.epochs):
  129.         for b in range(batches_per_epoch):
  130.             images, labels = mnist.train.next_batch(args.batch_size)
  131.             network.train(images, labels)
  132.  
  133.         network.evaluate("dev", mnist.validation.images, mnist.validation.labels)
  134.         accuracy, _, _ =network.evaluate("test", mnist.test.images, mnist.test.labels)
  135.  
  136.     # TODO: Compute accuracy on the test set and print it as percentage rounded
  137.     # to two decimal places.
  138.     print("{:.2f}".format(100 * accuracy))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement