Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train(self,data,num_epochs=150,num_labeled=-1,noise_std=0.3,lr=0.02,decay_after=15):
- tf.reset_default_graph()
- #Create placeholders. They will be assigned when we start the session with feed_dict{...}
- inputs = tf.placeholder(tf.float32, shape=(None, architecture[0]), name='inputs')
- outputs = tf.placeholder(tf.float32, name='outputs')
- #This variable should be assigned in a loop in a function that is being called later in the script
- #We later want to reference that variable after loading the model.
- last_layer_activation = tf.Variable(tf.ones((1,architecture[L-1])),name='last_layer_activation')
- #This is the function where the assignment is made.
- def encoder(inputs,noise_std):
- h = inputs + tf.random_normal(tf.shape(inputs)) * noise_std #Clean input if the noise std is set to zero
- #Loop through all the layers. Doing forward propagation and updating the values we need to keep track of.
- for l in range(1, L+1): #Max. index: L
- m_l, v_l = tf.nn.moments(z_pre_l, axes=[0]) #CHANGE
- if l == L:
- #Convert z and apply softmax for the last layer. (TODO: Only for prediction or if we pass through encoder?)
- h = tf.nn.softmax(weights['gamma'][l-1] * (z+weights['beta'][l-1]))
- elif l == L-1: #@@@@@ I want to save this in my model.
- h = tf.nn.relu(z + weights['beta'][l-1])
- tf.assign(last_layer_activation,h,name='last_layer_assignment')
- else:
- h = tf.nn.relu(z + weights['beta'][l-1]) #TODO: No gamma?
- return h, d
- sess = tf.Session()
- init = tf.global_variables_initializer()
- sess.run(init)
- print(sess.run(tf.report_uninitialized_variables()))
- epoch_n = 0
- for i in range(int(i_iter),num_iter):
- #Get the next batch of batch size (images and labels)
- images, labels = data.next_batch()
- sess.run(train_step, feed_dict={inputs: images, outputs: labels, training: True})
- if (i > 1) and ((i+1) % (num_iter//num_epochs) == 0):
- epoch_n = i // (n_examples//batch_size)
- print("Epoch: %s, Accuracy: %s" % (epoch_n, sess.run([accuracy], feed_dict={inputs: data.validate[0], outputs: data.validate[1], training: True})))
- #At the end of the loop, I want to specify the model inputs and outputs
- #That I want to save.
- model_inputs = {
- "inputs_placeholder":inputs,
- "outputs_placeholder":outputs
- }
- model_outputs = {
- "accuracy": accuracy,
- "clean_output": y,
- "last_layer_activation": last_layer_activation,
- "last_layer_assignment": last_layer_assignment
- }
- tf.saved_model.simple_save(sess, c + '_models/',model_inputs,model_outputs)
- def get_last_layer_activation(self,X,dim):
- c = str(self._id)
- act = np.zeros((X.shape[0],dim))
- from tensorflow.python.saved_model import tag_constants
- graph = tf.Graph()
- saver = tf.train.import_meta_graph(self.get_latest_meta())
- restored_graph = tf.get_default_graph()
- with restored_graph.as_default():
- with tf.Session() as sess:
- tf.saved_model.loader.load(
- sess,
- [tag_constants.SERVING],
- c + '_models/'
- )
- inputs_placeholder = restored_graph.get_tensor_by_name('inputs:0')
- outputs_placeholder = restored_graph.get_tensor_by_name('outputs:0')
- training = restored_graph.get_tensor_by_name('training:0')
- last_layer_activation = restored_graph.get_tensor_by_name('last_layer_activation:0')
- last_layer_assignment = restored_graph.get_tensor_by_name('last_layer_assignment:0')
- for idx, row in enumerate(X):
- sess.run(last_layer_assignment, feed_dict={inputs_placeholder: np.reshape(row,[1,X.shape[1]]), training:True})
- out = sess.run(last_layer_activation, feed_dict={inputs_placeholder: np.reshape(row,[1,X.shape[1]]), training:True})
- act[idx,:] = out
- return act
Add Comment
Please, Sign In to add comment