SHARE
TWEET

Untitled

a guest Mar 20th, 2017 61 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. NHIDDEN = 50
  2. STDEV   = 0.1
  3. KMIX    = 20 # NUMBER OF MIXTURES
  4. NOUT    = KMIX * 3 # PI / MU / STD
  5.  
  6. x = tf.placeholder(dtype=tf.float32, shape=[None,1], name="x")
  7. y = tf.placeholder(dtype=tf.float32, shape=[None,1], name="y")
  8.  
  9. Wmdn = {
  10.     "l1": tf.Variable(tf.random_normal([1,NHIDDEN], stddev=STDEV, dtype=tf.float32)),
  11.     "l2_mds": tf.Variable(tf.random_normal([NHIDDEN,NOUT], stddev=STDEV, dtype=tf.float32)),
  12.     "l2_err": tf.Variable(tf.random_normal([NHIDDEN,1], stddev=STDEV, dtype=tf.float32))
  13. }
  14. bmdn = {
  15.     "l1": tf.Variable(tf.zeros([1,NHIDDEN], dtype=tf.float32)),
  16.     "l2_mds": tf.Variable(tf.random_uniform([1,NOUT], minval=-10, maxval=10, dtype=tf.float32)),
  17.     "l2_err": tf.Variable(tf.zeros([1,1], dtype=tf.float32))
  18. }
  19.  
  20. def hmdn(_x, _W, _b):
  21.     sig_gain = 1
  22.     _l1  = tf.nn.tanh(tf.matmul(_x, _W['l1']) + _b['l1'])
  23.     _out_mds = tf.matmul(_l1, _W['l2_mds']) + _b['l2_mds']
  24.     _out_err_sigma_hat = tf.matmul(_l1, _W['l2_err']) + _b['l2_err']
  25.     # CONVERT OUTPUT (PI / MU / SIGMA)
  26.     _out_pi_hat, _out_sigma_hat, _out_mu = tf.split(_out_mds, 3, 1)
  27.     _out_pi_hat = tf.exp(_out_pi_hat - tf.reduce_max(_out_pi_hat, 1, keep_dims=True))
  28.     _nor_pi = tf.reciprocal(tf.reduce_sum(_out_pi_hat, 1, keep_dims=True))
  29.     _out_pi = tf.multiply(_nor_pi, _out_pi_hat)
  30.     _out_sigma = sig_gain*tf.sigmoid(_out_sigma_hat)
  31.     # CONVERT NOISE MODEL (SIGMA)
  32.     _out_err_sigma = sig_gain*tf.sigmoid(_out_err_sigma_hat)
  33.     return _out_pi, _out_sigma, _out_mu, _out_err_sigma
  34. out_pi, out_sigma, out_mu, out_err_sigma = hmdn(x, Wmdn, bmdn)
  35. print ("HETEROSCEDASTIC MIXTURE DENSITY NETOWRK READY")
  36.  
  37. pi = math.pi
  38. # UNIVARIATE GAUSSIAN MODEL
  39. def tf_normal(_y, _mu, _sigma):
  40.     _result = (_y-_mu)/_sigma
  41.     _result = -tf.square(_result)/2
  42.     _result = tf.exp(_result)/(math.sqrt(2*pi)*_sigma)    
  43.     return _result
  44.  
  45. # HETEROSCEDASTIC GAUSSIAN MIXTURE MODEL
  46. def hgmm(_y, _out_pi, _out_sigma, _out_mu, _out_err_sig):
  47.     _probs  = tf_normal(_y, _out_mu, _out_sigma+_out_err_sig)
  48.     _result = tf.multiply(_out_pi, _probs)
  49.     _result = tf.reduce_sum(_result, 1, keep_dims=True)
  50.     return tf.reduce_mean(-tf.log(_result))
  51.  
  52. # SET LOSS AND OPTIMIZER
  53. loss = hgmm(y, out_pi, out_sigma, out_mu, out_err_sigma)
  54. optm = tf.train.AdamOptimizer(learning_rate=0.001
  55.             , beta1=0.9, beta2=0.999, epsilon=0.1).minimize(loss)
RAW Paste Data
Top