Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
- import tensorflow as tf
- import numpy as np
- ## TODO
- ## - Move learning rate
- ## - Save model, restore model
- class NNModel():
- def __init__(self, layers, num_classes):
- print("In NNModel constructor")
- self.layers = layers
- self.num_classes = num_classes
- self.learning_rate = 0.1
- self.model = tf.estimator.Estimator(self.build)
- def build_layers(self, dictionary):
- tmp = dictionary["x"]
- for layer in self.layers:
- tmp = tf.layers.dense(tmp, layer)
- tmp = tf.layers.dense(tmp, self.num_classes)
- return tmp
- def train(self, input_fn, num_steps):
- self.model.train(input_fn, steps = num_steps)
- def evaluate(self, input_fn):
- return self.model.evaluate(input_fn)
- def predict(self, input_fn):
- return self.model.predict(input_fn)
- def build(self, features, labels, mode):
- logits = self.build_layers(features)
- pred_classes = tf.argmax(logits, axis=1)
- pred_probas = tf.nn.softmax(logits)
- if mode == tf.estimator.ModeKeys.PREDICT:
- return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
- loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
- logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
- optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
- train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())
- # Evaluate the accuracy of the model
- acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
- # TF Estimators requires to return a EstimatorSpec, that specify
- # the different ops for training, evaluating, ...
- estim_specs = tf.estimator.EstimatorSpec(
- mode=mode,
- predictions=pred_classes,
- loss=loss_op,
- train_op=train_op,
- eval_metric_ops={'accuracy': acc_op})
- return estim_specs
Add Comment
Please, Sign In to add comment