Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def log_probability(theta, x, y, dy)
- """
- Log-probability function.
- theta : free parameters describing Kernel.
- x : sampling locations
- y : observable
- dy : uncertainty on observable
- Should return the log-likelihood.
- """
- # Priors.
- ln_priors = log_priors(theta)
- if not np.isfinite(ln_priors):
- return -np.inf
- # Make a GP and sample this to numerically calculate the gradient.
- kernel = celerite.terms.Matern32Term(log_sigma=theta[0], log_rho=theta[1])
- kernel += celerite.terms.JitterTerm(log_sigma=theta[2])
- GP = celerite.GP(kernel)
- GP.compute(x, dy)
- P = GP.predict(y, x, return_cov=False) # This step is the one I'm not sure of.
- # Calculate the model.
- model = np.gradient(P, x) + 5. * x**3.0
- # Calculate log-likelihood and return.
- ln_likelihood = log_likelihood(x, y, dy, model)
- return ln_likelihood
Add Comment
Please, Sign In to add comment