Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- import data
- import matplotlib.pyplot as plt
- from sklearn.linear_model import LogisticRegression
- from sklearn.linear_model import SGDClassifier
- from functools import reduce
- class TFDeep:
- def __init__(self, arch, param_delta=0.5, param_lambda=0.0):
- if len(arch) < 2:
- raise Exception('arch parameter must have at least 2 arguments: data dimensions (first), class num(last)')
- D, C = arch[0], arch[-1]
- self.X = h = sigmoids = tf.placeholder(tf.float32, shape=(None, D), name="input_X")
- self.Yoh_ = tf.placeholder(tf.float32, shape=(None, C), name="labels_Y")
- self.l2_loss = 0.0
- self.weight_list = []
- # Create graph basics
- for i in range(1, len(arch)):
- weights = tf.Variable(tf.random_uniform([arch[i-1], arch[i]]), name="weights_" + str(i))
- self.weight_list.append(weights)
- self.l2_loss = tf.add(tf.nn.l2_loss(weights), self.l2_loss)
- b = tf.Variable(tf.zeros(arch[i]), name="bias_" + str(i))
- h = tf.matmul(sigmoids, weights) + b
- sigmoids = tf.nn.sigmoid(h)
- self.probs = tf.nn.softmax(h)
- self.l2_loss = param_lambda * self.l2_loss
- self.loss = self.l2_loss + tf.losses.log_loss(self.Yoh_, self.probs)
- self.print_loss = tf.print("loss ", self.loss)
- self.trainer = tf.train.GradientDescentOptimizer(param_delta)
- self.train_step = self.trainer.minimize(self.loss)
- self.session = tf.Session()
- def train(self, X, Yoh_, param_niter):
- self.session.run(tf.initialize_all_variables())
- for i in range(0, param_niter):
- self.session.run([self.train_step, self.print_loss], feed_dict={self.X: X, self.Yoh_: Yoh_})
- def count_params(self):
- parameter_counter = 0
- for parameter in tf.trainable_variables():
- shape = parameter.get_shape()
- tmp_cntr = 1
- for dim in shape:
- tmp_cntr *= dim
- parameter_counter += tmp_cntr
- print(parameter.name)
- return parameter_counter
- def eval(self, X):
- return np.argmax(self.session.run(self.probs, feed_dict={self.X: X}), axis=1)
- def eval_weights(self, X_in):
- weig = []
- for i in range(0, self.weight_list.__len__()):
- weig.append(self.session.run(self.weight_list[i], feed_dict={self.X: X_in}))
- return weig
- if __name__ == "__main__":
- # inicijaliziraj generatore slučajnih brojeva
- np.random.seed(100)
- tf.set_random_seed(100)
- reg_factor = 1e-4
- # instanciraj podatke X i labele Yoh_
- K, C, N = 6, 3, 10
- X, Y_ = data.sample_gmm(K, C, N)
- Yoh_ = np.zeros((N * K, C))
- Yoh_[np.arange(N*K), Y_] = 1
- clf = LogisticRegression(solver='sag', C=1.0/reg_factor)
- clf.fit(X, Y_)
- Y_pred = clf.predict(X)
- rect = (np.min(X, axis=0), np.max(X, axis=0))
- data.graph_surface(clf.predict, rect, offset=0.5)
- data.graph_data(X, Y_, Y_pred, special=[])
- plt.show()
- # izgradi graf (izaberi jednu od donje tri opcije iz podzadatka)
- # ako je relu smanjiti stopu ucenja na 0.005
- tflr = TFDeep([2, 10, 10, C], 0.1, reg_factor)
- # tflr = TFDeep([2, 10, C], 0.1, reg_factor)
- # tflr = TFDeep([2, C], 0.1, reg_factor)
- # nauči parametre:
- tflr.train(X, Yoh_, 10000)
- acc, prec_rec, _ = data.eval_perf_multi(Y_, tflr.eval(X))
- print("Accuracy = ", acc)
- print("Precision, Recall ", prec_rec)
- print("Number of parameters: ", str(tflr.count_params()))
- # dohvati vjerojatnosti na skupu za učenje
- probs = tflr.eval(X)
- # ispiši performansu (preciznost i odziv po razredima)
- # iscrtaj rezultate, decizijsku plohu
- rect = (np.min(X, axis=0), np.max(X, axis=0))
- data.graph_surface(tflr.eval, rect, offset=0.5)
- data.graph_data(X, Y_, probs, special=[])
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement