Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import absolute_import
- from __future__ import division
- from __future__ import print_function
- import argparse
- import sys
- import numpy as np
- import pandas as pd
- import tensorflow as tf
- LEARNING_RATE = 0.001
- def model_fn(features, labels, mode, params):
- """Model function for Estimator."""
- input_layer = tf.reshape(features["x"], [11,11,31,4])
- first_hidden_layer = tf.layers.dense(input_layer, 4, activation=tf.nn.relu)
- # Connect the second hidden layer to first hidden layer with relu
- second_hidden_layer = tf.layers.dense(
- first_hidden_layer, 5, activation=tf.nn.relu)
- # Connect the output layer to second hidden layer (no activation fn)
- output_layer = tf.layers.dense(second_hidden_layer, 6)
- # Reshape output layer to 1-dim Tensor to return predictions
- predictions = tf.reshape(output_layer, [-1,6])
- # Provide an estimator spec for `ModeKeys.PREDICT`.
- if mode == tf.estimator.ModeKeys.PREDICT:
- return tf.estimator.EstimatorSpec(
- mode=mode,
- predictions={"ages": predictions})
- # Calculate loss using mean squared error
- loss = tf.losses.mean_squared_error(labels, predictions)
- optimizer = tf.train.GradientDescentOptimizer(
- learning_rate=params["learning_rate"])
- train_op = optimizer.minimize(
- loss=loss, global_step=tf.train.get_global_step())
- # Calculate root mean squared error as additional eval metric
- eval_metric_ops = {
- "rmse": tf.metrics.root_mean_squared_error(
- tf.cast(labels, tf.float64), predictions)
- }
- # Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
- return tf.estimator.EstimatorSpec(
- mode=mode,
- loss=loss,
- train_op=train_op,
- eval_metric_ops=eval_metric_ops)
- def main(unused_argv):
- train_file = "training_data_mc1000.csv"
- test_file = "test_data_mc1000.csv"
- train_features_interim = pd.read_csv(train_file, usecols=['vgs', 'vbs', 'vds', 'current'])
- train_features_numpy = np.asarray(train_features_interim, dtype=np.float64)
- train_labels_interim = pd.read_csv(train_file, usecols=['plo_tox', 'plo_dxl', 'plo_dxw', 'parl1', 'parl2', 'random_fn'])
- train_labels_numpy = np.asarray(train_labels_interim, dtype=np.float64)
- # Set model params
- model_params = {"learning_rate": LEARNING_RATE}
- # Instantiate Estimator
- nn = tf.estimator.Estimator(model_fn=model_fn, params=model_params)
- train_input_fn = tf.estimator.inputs.numpy_input_fn(
- x={"x": train_features_numpy},
- y=train_labels_numpy,
- num_epochs=None,
- shuffle=True)
- # Train
- nn.train(input_fn=train_input_fn, max_steps=1048576)
- test_features_interim = pd.read_csv(test_file, usecols=['vgs', 'vbs', 'vds', 'current'])
- test_features_numpy = np.asarray(test_features_interim, dtype=np.float64)
- test_labels_interim = pd.read_csv(test_file, usecols=['plo_tox', 'plo_dxl', 'plo_dxw', 'parl1', 'parl2', 'random_fn'])
- test_labels_numpy = np.asarray(test_labels_interim, dtype=np.float64)
- # Score accuracy
- test_input_fn = tf.estimator.inputs.numpy_input_fn(
- x={"x": test_features_numpy},
- y=test_labels_numpy,
- num_epochs=1,
- shuffle=False)
- ev = nn.evaluate(input_fn=test_input_fn)
- print("Loss: %s" % ev["loss"])
- print("Root Mean Squared Error: %s" % ev["rmse"])
- prediction_file = "Tensorflow_prediction_data.csv"
- predict_features_interim = pd.read_csv(prediction_file, usecols=['vgs', 'vbs', 'vds', 'current'])
- predict_features_numpy = np.asarray(predict_features_interim, dtype=np.float64)
- # Print out predictions
- predict_input_fn = tf.estimator.inputs.numpy_input_fn(
- x= {"x": predict_features_numpy},
- num_epochs=1,
- shuffle=False)
- predictions = nn.predict(input_fn=predict_input_fn)
- for i, p in enumerate(predictions):
- print("Prediction %s: %s" % (i + 1, p["ages"]))
- if __name__ == '__main__':
- tf.logging.set_verbosity(tf.logging.INFO)
- parser = argparse.ArgumentParser()
- parser.register("type", "bool", lambda v: v.lower() == "true")
- parser.add_argument(
- "--train_data", type=str, default="", help="Path to the training data.")
- parser.add_argument(
- "--test_data", type=str, default="", help="Path to the test data.")
- parser.add_argument(
- "--predict_data",
- type=str,
- default="",
- help="Path to the prediction data.")
- FLAGS, unparsed = parser.parse_known_args()
- tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
Add Comment
Please, Sign In to add comment