Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from matplotlib import pyplot as plt
- import numpy as np
- NSAMPLE = 1000
- x_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
- r_data = np.float32(np.random.normal(size=(NSAMPLE,1)))
- y_data = np.float32(np.sin(0.75*x_data)*7.0+x_data*0.5+r_data*1.0)
- # plt.figure(figsize=(8, 8))
- # plt.title("Original")
- # plot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)
- temp_data = x_data
- x_data = y_data
- y_data = temp_data
- from torch import nn
- # the model must output
- num_mixtures = 128
- hidden_dim = 64
- output_dim = num_mixtures * 3 # categorical_logit, gaussian_mean, gaussian_stddev
- model = torch.nn.Sequential(nn.Linear(1, hidden_dim), nn.ReLU(), \
- nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, output_dim))
- import torch.distributions as D
- from torch.distributions.mixture_same_family import MixtureSameFamily
- optimizer = torch.optim.Adam(params=model.parameters())
- torch.distributions.Distribution.set_default_validate_args(True)
- def process_categorical_logits(cl):
- # get_mixture_coef: https://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/
- # make logsumexp numerically stable by normalizing the logits by the max value
- out_pi = torch.exp(cl - cl.max(dim=-1)[0].unsqueeze(-1))
- out_pi = (out_pi / out_pi.max(dim=-1)[0].unsqueeze(-1))
- return out_pi
- for i in range(1000):
- categorical_logits, gaussian_means, gaussian_logstddevs = model(torch.as_tensor(x_data)).chunk(3, dim=-1)
- print(process_categorical_logits(categorical_logits).max())
- print(process_categorical_logits(categorical_logits).min())
- mix = D.Categorical(probs=process_categorical_logits(categorical_logits))
- comp = D.Normal(loc=gaussian_means, scale=torch.exp(gaussian_logstddevs))
- gmm = MixtureSameFamily(mix, comp)
- loss = -torch.mean(gmm.log_prob(torch.as_tensor(y_data.squeeze())))
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- print(f"Loss {loss}")
- x_test = np.float32(np.arange(-15,15,0.01))
- NTEST = x_test.size
- x_test = x_test.reshape(NTEST,1) # needs to be a matrix, not a vector
- def generate_y_data(x_data, model):
- categorical_logits, gaussian_means, gaussian_logstddevs = model(torch.as_tensor(x_data)).chunk(3, dim=-1)
- mix = D.Categorical(logits=categorical_logits)
- comp = D.Normal(loc=gaussian_means, scale=torch.exp(gaussian_logstddevs))
- gmm = MixtureSameFamily(mix, comp)
- return gmm.sample()
- plt.figure(figsize=(8, 8))
- plt.title("Inverted")
- plot_out = plt.plot(x_data,y_data,'ro', x_test, generate_y_data(x_test, model),'bo',alpha=0.3)
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement