Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from sklearn.preprocessing import MinMaxScaler
- class Mlp(object):
- def __init__(self, n_input, h_size, n_layers = 1, act = tf.nn.relu, lr = 0.01):
- self.x = tf.placeholder('float',[None, n_input] )
- self.y = tf.placeholder('float', [None, 1])
- output = self.x
- w,b = self.init_variables(n_input, n_layers, h_size)
- for layer in np.arange(n_layers):
- output = act(tf.add(tf.matmul(output, w[layer]), b[layer]))
- self.loss = tf.reduce_mean((output - self.y)**2)
- self.train_step = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)
- @staticmethod
- def init_variables(n_input, h_size, n_layers):
- w,b = [],[]
- w.append(tf.Variable(tf.random_normal([n_input, h_size])))
- b.append(tf.Variable(tf.random_normal([h_size])))
- for _ in np.arange(n_layers):
- w.append(tf.Variable(tf.random_normal([h_size, h_size])))
- b.append(tf.Variable(tf.random_normal([h_size])))
- w.append(tf.Variable(tf.random_normal([h_size, 1])))
- b.append(tf.Variable(tf.random_normal([1])))
- return w, b
- # true function
- X = np.linspace(start= -1, stop = 1, num=2*60000)
- X = np.reshape(X, (-1, 2))
- y = (X[:,0] + X[:,1])
- dataset = np.column_stack((X, y))
- # params
- n_layers = 1
- h_size = 200
- lr = 0.1
- batch_size = 100
- eps = 0.05
- mlp = Mlp(n_input=2, n_layers = n_layers, h_size = h_size, act = tf.nn.relu, lr=lr)
- sess = tf.Session()
- sess.run(tf.global_variables_initializer())
- stop = False
- step = 0
- idx = np.arange(len(dataset))
- while not stop:
- sample_idx = np.random.choice(idx, batch_size)
- feed_dict = {
- mlp.x: dataset[sample_idx][:,:2],
- mlp.y: np.reshape(dataset[sample_idx][:, 2], (-1,1))
- }
- _, loss = sess.run([mlp.train_step, mlp.loss], feed_dict =feed_dict)
- step += 1
- if step % 100 == 0:
- print(loss)
- if loss < eps:
- stop = True
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement