Advertisement
Guest User

Untitled

a guest
Jul 27th, 2017
102
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.22 KB | None | 0 0
  1. from __future__ import absolute_import
  2. from __future__ import division
  3. from __future__ import print_function
  4.  
  5.  
  6. import grpc
  7. import numpy as np
  8. import tensorflow as tf
  9. from tensorflow.contrib import learn
  10. from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
  11. from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
  12. from tensorflow.python import debug as tf_debug
  13. tf.logging.set_verbosity(tf.logging.ERROR)
  14. import json
  15. import os
  16. import shutil
  17.  
  18. ### Data - Mnist
  19.  
  20. mnist=learn.datasets.load_dataset('mnist')
  21. train_data=mnist.train.images
  22. train_labels=np.asarray(mnist.train.labels, dtype=np.int32)
  23. eval_data=mnist.test.images
  24. eval_labels=np.asarray(mnist.test.labels, dtype=np.int32)
  25.  
  26. BATCH_SIZE=100
  27. NUM_EPOCHS=10
  28. train_input_fn = learn.io.numpy_input_fn({'x': train_data}, train_labels, shuffle=True, batch_size=BATCH_SIZE,
  29. num_epochs=NUM_EPOCHS)
  30. batch_size = 100
  31. num_epochs = 1
  32. eval_input_fn = learn.io.numpy_input_fn({'x': eval_data}, eval_labels, shuffle=False, batch_size=batch_size, num_epochs=num_epochs)
  33.  
  34. ### Cluster
  35.  
  36. my_cluster = {'ps': ['/cpu:0'],
  37. 'worker': ['/gpu:0']}
  38. os.environ['TF_CONFIG'] = json.dumps(
  39. {'cluster': my_cluster,
  40. 'task': {'type': 'worker', 'index': 1}})
  41.  
  42. my_configs=learn.RunConfig()
  43.  
  44. server = tf.train.Server(server_or_cluster_def=my_configs.cluster_spec, job_name='worker')
  45.  
  46. ### Model
  47.  
  48. def cnn_model_fn(features, labels, mode):
  49.  
  50. input_layer=tf.reshape(features['x'],shape=[-1,28,28,1])
  51.  
  52. #conv1
  53. conv1=tf.layers.conv2d(inputs=input_layer,
  54. filters=32,
  55. kernel_size=[5, 5],
  56. padding='same',
  57. activation=tf.nn.relu)
  58. pool1=tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2], strides=2)
  59.  
  60. #conv2
  61. conv2=tf.layers.conv2d(inputs=pool1,
  62. filters=64,
  63. kernel_size=[5,5],
  64. padding='same',
  65. activation=tf.nn.relu)
  66. pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2], strides=2)
  67.  
  68. #fully connected layers
  69. pool2_flat=tf.reshape(pool2, [-1, 7*7*64])
  70. dense1=tf.layers.dense(pool2_flat, 1024, activation=tf.nn.relu)
  71. dropout = tf.layers.dropout(inputs=dense1, rate=0.4, training=mode == learn.ModeKeys.TRAIN)
  72.  
  73. #fc2
  74. logits=tf.layers.dense(dropout, 10, activation=tf.nn.relu)
  75. loss = None
  76. train_op = None
  77.  
  78. #loss
  79. if mode != learn.ModeKeys.INFER:
  80. onehot_labels=tf.one_hot(indices=tf.cast(labels, tf.int32),depth=10)
  81. loss=tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
  82.  
  83. #optimizer
  84. if mode == learn.ModeKeys.TRAIN:
  85. with tf.device("/job:worker/task:1"):
  86. train_op = tf.contrib.layers.optimize_loss(
  87. loss=loss,
  88. global_step=tf.contrib.framework.get_global_step(),
  89. learning_rate=0.0001,
  90. optimizer="Adam")
  91.  
  92. #predictions
  93. predictions={
  94. 'classes': tf.argmax(logits, axis=1) ,
  95. 'predictions': tf.nn.softmax(logits,name="softmax_tensor")
  96. }
  97. return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, loss=loss, train_op=train_op)
  98.  
  99. classifier=learn.Estimator(model_fn=cnn_model_fn, model_dir="/tmp/mnist_distributed", config=my_configs)
  100.  
  101. ### logging
  102.  
  103. tensors_to_log = {"probabilities": "softmax_tensor"}
  104. logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
  105.  
  106. ### Metrics
  107.  
  108. metrics = {
  109. "accuracy":
  110. learn.MetricSpec(
  111. metric_fn=tf.metrics.accuracy, prediction_key="classes"),
  112. }
  113.  
  114. ### Distributing training
  115.  
  116. distributed_experiment=learn.Experiment(estimator=classifier,
  117. train_input_fn=train_input_fn,
  118. eval_input_fn=eval_input_fn,
  119. eval_metrics=metrics,
  120. #train_monitors=my_monitors,
  121. train_steps=200,
  122. )
  123.  
  124. distributed_experiment.train_and_evaluate()
  125.  
  126. from tensorflow.contrib.learn.python.learn import learn_runner
  127. from tensorflow.contrib.learn.python.learn.estimators import run_config
  128.  
  129. ...
  130.  
  131. learn_runner.run(
  132. experiment_fn=create_experiment_fn(config),
  133. output_dir=output_dir)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement