Advertisement
Guest User

Untitled

a guest
Mar 18th, 2018
77
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.73 KB | None | 0 0
  1. from __future__ import absolute_import
  2. from __future__ import print_function
  3. from future.standard_library import install_aliases
  4. install_aliases()
  5.  
  6. import autograd.numpy as np
  7. import autograd.numpy.random as npr
  8. from autograd import grad
  9. from autograd.misc.optimizers import adam
  10. from autograd.scipy.misc import logsumexp
  11. from autograd.scipy.special import expit as sigmoid
  12.  
  13. import os
  14. import gzip
  15. import struct
  16. import array
  17.  
  18. import matplotlib.pyplot as plt
  19. import matplotlib.image
  20. from urllib.request import urlretrieve
  21.  
  22. from data import load_mnist, plot_images, save_images
  23.  
  24. # Load MNIST and Set Up Data
  25. N_data, train_images, train_labels, test_images, test_labels = load_mnist()
  26. train_images = np.round(train_images[0:10000])
  27. train_labels = train_labels[0:10000]
  28. test_images = np.round(test_images[0:10000])
  29.  
  30.  
  31. # Starter Code for 4d
  32. # A correct solution here only requires you to correctly write the neglogprob!
  33. # Because this setup is numerically finicky
  34. # the default parameterization I've given should give results if neglogprob is correct.
  35. K = 30
  36. D = 784
  37.  
  38. # Random initialization, with set seed for easier debugging
  39. # Try changing the weighting of the initial randomization, default 0.01
  40. init_params = npr.RandomState(0).randn(K, D) * 0.01
  41.  
  42. # Implemented batching for you
  43. batch_size = 10
  44. num_batches = int(np.ceil(len(train_images) / batch_size))
  45. def batch_indices(iter):
  46.     idx = iter % num_batches
  47.     return slice(idx * batch_size, (idx+1) * batch_size)
  48.  
  49. # This is numerically stable code to for the log of a bernoulli density
  50. # In particular, notice that we're keeping everything as log, and using logaddexp
  51. # We never want to take things out of log space for stability
  52. def bernoulli_log_density(targets, unnormalized_logprobs):
  53.     # unnormalized_logprobs are in R
  54.     # Targets must be 0 or 1
  55.     t2 = targets * 2 - 1
  56.     # Now t2 is -1 or 1, which makes the following form nice
  57.     label_probabilities = -np.logaddexp(0, -unnormalized_logprobs*t2)
  58.     return np.sum(label_probabilities, axis=-1)   # Sum across pixels.
  59.  
  60. def batched_loss(params, iter):
  61.     data_idx = batch_indices(iter)
  62.     return neglogprob(params, train_images[data_idx, :])
  63.  
  64. def neglogprob(params, data):
  65.     logprob = 0
  66.     for x in data:
  67.         logbern = bernoulli_log_density(x, params)
  68.         logpi = np.log(np.ones((1, 30)) * 1./30.)
  69.         logprob = logsumexp(logpi + logbern)
  70.     logprob /= len(data)
  71.     return - logprob
  72.  
  73. # Get gradient of objective using autograd.
  74. objective_grad = grad(batched_loss)
  75.  
  76. def print_perf(params, iter, gradient):
  77.     if iter % 30 == 0:
  78.         save_images(sigmoid(params), 'q4plot.png')
  79.         print(batched_loss(params, iter))
  80.  
  81. # The optimizers provided by autograd can optimize lists, tuples, or dicts of parameters.
  82. # You may use these optimizers for Q4, but implement your own gradient descent optimizer for Q3!
  83. tp = adam(objective_grad, init_params, step_size=0.2, num_iters=10000, callback=print_perf)
  84.  
  85. theta = sigmoid(tp)
  86. N, tx, ty, vx, vy = load_mnist()
  87. tx = np.around(tx)
  88. vx = np.around(vx)
  89. pi = np.ones(30) * 1./30.
  90. idx = np.random.choice(N, 20, False)
  91. imgs_top = tx[idx, :392]
  92.  
  93. theta_top = theta.reshape(-1, 784)[:, :392]
  94. theta_bottom = theta.reshape(-1, 784)[:, 392:]
  95. ims = np.zeros((20, 784))
  96. for k, img_top in enumerate(imgs_top):
  97.     power = np.power(theta_top, img_top)
  98.     npower = np.power(1 - theta_top, 1 - img_top)
  99.     prod = np.prod(power*npower, axis=(1))
  100.     den = np.sum(prod)
  101.     img_bottom = np.zeros(392)
  102.     for i in range(1, 392):
  103.         num = np.sum(pi * theta_bottom[:, i] * prod)
  104.         p = num / den
  105.         img_bottom[i] = p
  106.     ims[k, :392] = img_top
  107.     ims[k, 392:] = img_bottom / img_bottom.max()
  108.  
  109. save_images(ims, 'mix_bern_sampling.png')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement