Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy
- from moe.optimal_learning.python.cpp_wrappers.covariance import SquareExponential as cppSquareExponential
- from moe.optimal_learning.python.cpp_wrappers.domain import TensorProductDomain as cppTensorProductDomain
- from moe.optimal_learning.python.cpp_wrappers.gaussian_process import GaussianProcess as cppGaussianProcess
- from moe.optimal_learning.python.cpp_wrappers.optimization import GradientDescentParameters as cppGradientDescentParameters
- from moe.optimal_learning.python.data_containers import HistoricalData
- from moe.optimal_learning.python.geometry_utils import ClosedInterval
- from moe.optimal_learning.python.python_version.covariance import SquareExponential as pythonSquareExponential
- from moe.optimal_learning.python.python_version.domain import TensorProductDomain as pythonTensorProductDomain
- from moe.optimal_learning.python.python_version.gaussian_process import GaussianProcess as pythonGaussianProcess
- from moe.optimal_learning.python.cpp_wrappers.expected_improvement import ExpectedImprovement as cppExpectedImprovement
- from moe.optimal_learning.python.cpp_wrappers.expected_improvement import multistart_expected_improvement_optimization
- from moe.optimal_learning.python.cpp_wrappers.optimization import GradientDescentOptimizer as cppGradientDescentOptimizer
- from moe.optimal_learning.python.cpp_wrappers.log_likelihood import GaussianProcessLogLikelihood as cppGaussianProcessLogLikelihood
- from moe.optimal_learning.python.cpp_wrappers.log_likelihood import multistart_hyperparameter_optimization
- from moe.optimal_learning.python.cpp_wrappers.optimization import NewtonOptimizer as cppNewtonOptimizer
- from moe.optimal_learning.python.cpp_wrappers.optimization import NewtonParameters as cppNewtonParameters
- sgd_params = cppGradientDescentParameters(num_multistarts=100, max_num_steps=50, max_num_restarts=2,
- num_steps_averaged=15, gamma=0.7, pre_mult=1.0,
- max_relative_change=0.7, tolerance=1.0e-3)
- class Branin(object):
- def __init__(self):
- self._dim = 2
- self._search_domain = numpy.repeat([[-15., 15.]], 2, axis=0)
- self._hyper_domain = numpy.array([[10., 100.], [0.1, 15.], [0.1, 15.]])
- self._num_init_pts = 15
- self._sample_var = 0.01
- def evaluate(self, x):
- """ This function is usually evaluated on the square x_1 \in [-5, 10], x_2 \in [0, 15]. Global minimum
- is at x = [-pi, 12.275], [pi, 2.275] and [9.42478, 2.475] with minima f(x*) = 0.397887.
- :param x[2]: 2-dim numpy array
- """
- a = 1
- b = 5.1 / (4 * pow(numpy.pi, 2.0))
- c = 5 / numpy.pi
- r = 6
- s = 10
- t = 1 / (8 * numpy.pi)
- return (a * pow(x[1] - b * pow(x[0], 2.0) + c * x[0] - r, 2.0) + s * (1 - t) * numpy.cos(x[0]) + s)
- def hyper_opt_newton(cpp_cov, data, hyper_domain):
- cpp_search_domain = cppTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in hyper_domain])
- newton_opt_param = cppNewtonParameters(num_multistarts=100, max_num_steps=100, gamma=1.01, time_factor=1.0e-3, max_relative_change=1.0, tolerance=1.0e-10)
- cpp_gp_loglikelihood = cppGaussianProcessLogLikelihood(cpp_cov, data)
- newton_optimizer = cppNewtonOptimizer(cpp_search_domain, cpp_gp_loglikelihood, newton_opt_param)
- return multistart_hyperparameter_optimization(newton_optimizer, -1, max_num_threads=16)
- if __name__ == "__main__":
- objective_func = Branin()
- python_search_domain = pythonTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in objective_func._search_domain])
- cpp_search_domain = cppTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in objective_func._search_domain])
- initial_points = python_search_domain.generate_uniform_random_points_in_domain(15)
- hist_data = HistoricalData(objective_func._dim)
- hist_data.append_historical_data(initial_points, [objective_func.evaluate(pt) for pt in initial_points], objective_func._sample_var * numpy.ones(len(initial_points)))
- python_cov = pythonSquareExponential(numpy.ones(objective_func._dim+1))
- cpp_cov = cppSquareExponential(numpy.ones(objective_func._dim+1))
- for i in range(50):
- hyper = hyper_opt_newton(cpp_cov, hist_data, objective_func._hyper_domain)
- python_cov.set_hyperparameters(hyper)
- cpp_cov.set_hyperparameters(hyper)
- python_gp = pythonGaussianProcess(python_cov, hist_data)
- cpp_gp = cppGaussianProcess(cpp_cov, hist_data)
- cpp_ei_evaluator = cppExpectedImprovement(gaussian_process=cpp_gp, num_mc_iterations=int(1e6))
- optimizer = cppGradientDescentOptimizer(cpp_search_domain, cpp_ei_evaluator, sgd_params, int(1000))
- next_points = multistart_expected_improvement_optimization(optimizer, None, 1, use_gpu=False, which_gpu=0, max_num_threads=1)
- cpp_ei_evaluator.set_current_point(next_points)
- print "{0}th itr, best val: {1}, voi: {2}".format(i, numpy.amin(hist_data.points_sampled_value), cpp_ei_evaluator.compute_expected_improvement())
- hist_data.append_historical_data(next_points, [objective_func.evaluate(next_points[0])], [objective_func._sample_var])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement