Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- n_inputs = len(X_train.columns) - 2 # Exclude the datetime and station columns
- n_hidden1 = params['no_neurons'] # Neurons in hidden layer "n"
- n_hidden2 = params['no_neurons']
- n_outputs = 1 # Number of outputs
- X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
- y = tf.placeholder(tf.float32, shape=(None), name="y")
- training = tf.placeholder_with_default(False, shape=(), name='training')
- dropout_rate = params['dropout_rate']
- X_drop = tf.layers.dropout(X, dropout_rate, training=training)
- he_init = tf.initializers.he_normal()
- with tf.name_scope("dnn"):
- hidden1 = tf.layers.dense(X_drop, n_hidden1, kernel_initializer=he_init, name="hidden1")
- bn1 = tf.layers.batch_normalization(hidden1, training=training)
- bn1_act = tf.nn.elu(bn1)
- bn1_act_drop = tf.layers.dropout(bn1_act, dropout_rate, training=training)
- hidden2 = tf.layers.dense(bn1_act_drop, n_hidden2, kernel_initializer=he_init, name="hidden2")
- bn2 = tf.layers.batch_normalization(hidden2, training=training)
- bn2_act = tf.nn.elu(bn2)
- bn2_act_drop = tf.layers.dropout(bn2_act, dropout_rate, training=training)
- output_before_bn = tf.layers.dense(bn2_act_drop, n_outputs, kernel_initializer=he_init, name="outputs")
- output = tf.layers.batch_normalization(output_before_bn, training=training)
- with tf.name_scope("loss"):
- loss = tf.reduce_mean(tf.square(y - output))
- initial_learning_rate = params['initial_learning_rate']
- decay_steps = params['decay_steps']
- decay_rate = 0.1
- with tf.name_scope("train"):
- global_step = tf.Variable(0, trainable=False, name="global_step")
- learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, decay_steps, decay_rate)
- optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=params['momentum'], use_nesterov=True)
- training_op = optimizer.minimize(loss, global_step=global_step)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement