Guest User

Untitled

a guest
Dec 17th, 2018
87
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.76 KB | None | 0 0
  1. import tensorflow as tf
  2. import optuna
  3. import sklearn.datasets
  4. from sklearn.model_selection import train_test_split
  5.  
  6.  
  7. class OptunaPruningHook(tf.train.SessionRunHook):
  8.  
  9. def __init__(self, trial, estimator, metrics_name, is_higher_better):
  10. self.trial = trial
  11. self.estimator = estimator
  12. self.current_step = -1
  13. self.metrics_name = metrics_name
  14. self.is_higher_better = is_higher_better
  15.  
  16. def after_run(self, run_context, run_value):
  17. eval_metrics = tf.contrib.estimator.read_eval_metrics(self.estimator.eval_dir())
  18. if eval_metrics:
  19. step = next(reversed(eval_metrics))
  20. latest_eval_metrics = eval_metrics[step]
  21. # If there exists a new evaluation summary
  22. if step > self.current_step:
  23. if self.is_higher_better:
  24. current_score = 1.0 - latest_eval_metrics[self.metrics_name]
  25. else:
  26. current_score = latest_eval_metrics[self.metrics_name]
  27. self.trial.report(current_score, step=step)
  28. self.current_step = step
  29. if self.trial.should_prune(self.current_step):
  30. message = "Trial was pruned at iteration {}.".format(self.current_step)
  31. raise optuna.structs.TrialPruned(message)
  32.  
  33.  
  34. def create_input_fn():
  35. iris = sklearn.datasets.load_iris()
  36. x, y = iris.data, iris.target
  37. x_train, x_eval, y_train, y_eval = train_test_split(x, y, test_size=0.5, random_state=42)
  38.  
  39. def _train_input_fn():
  40. dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
  41. dataset = dataset.shuffle(128).repeat().batch(16)
  42. iterator = dataset.make_one_shot_iterator()
  43. features, labels = iterator.get_next()
  44. return {"x": features}, labels
  45.  
  46. def _eval_input_fn():
  47. dataset = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
  48. dataset = dataset.batch(32)
  49. iterator = dataset.make_one_shot_iterator()
  50. features, labels = iterator.get_next()
  51. return {"x": features}, labels
  52.  
  53. return _train_input_fn, _eval_input_fn
  54.  
  55.  
  56. def objective(trial):
  57. save_steps = 50
  58. # Create input functions for train and eval
  59. train_input_fn, eval_input_fn = create_input_fn()
  60. # Hyper parameters to be tuned with Optuna
  61. learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1e-1)
  62. # Create Estimator config
  63. config = tf.estimator.RunConfig(save_summary_steps=save_steps, save_checkpoints_steps=save_steps)
  64. # Create Estimator
  65. clf = tf.estimator.DNNClassifier(
  66. feature_columns=[tf.feature_column.numeric_column(key="x", shape=[4])],
  67. n_classes=3,
  68. hidden_units=[],
  69. optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate),
  70. model_dir="outputs_pruning/lr_{}".format(learning_rate),
  71. config=config
  72. )
  73. # Create hooks
  74. early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(clf, "accuracy", save_steps)
  75. optuna_pruning_hook = OptunaPruningHook(trial=trial, estimator=clf, metrics_name="accuracy", is_higher_better=True)
  76. hooks = [early_stopping_hook, optuna_pruning_hook]
  77. # Create TrainSpec and EvalSpec
  78. train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=500, hooks=hooks)
  79. eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=1000, start_delay_secs=0, throttle_secs=0)
  80. # Run training and evaluation
  81. tf.estimator.train_and_evaluate(clf, train_spec, eval_spec)
  82. result = clf.evaluate(input_fn=eval_input_fn, steps=100)
  83. accuracy = result["accuracy"]
  84. return 1.0 - accuracy
  85.  
  86.  
  87. if __name__ == "__main__":
  88. study = optuna.create_study(pruner=optuna.pruners.MedianPruner(n_warmup_steps=100))
  89. study.optimize(objective, n_trials=20)
  90. print(study.best_trial)
  91. print([t.state for t in study.trials])
Add Comment
Please, Sign In to add comment