|
| 1 | +""" |
| 2 | +Implementation of the Bayesian Optimization Algorithms |
| 3 | +
|
| 4 | +Authors: Tucker Hartland <hartland1@llnl.gov> |
| 5 | + Nai-Yuan Chiang <chiang7@llnl.gov> |
| 6 | +""" |
| 7 | + |
| 8 | +import numpy as np |
| 9 | +from numpy.random import uniform |
| 10 | +from scipy.optimize import minimize |
| 11 | +from ..surrogate_modeling.gp import GaussianProcess |
| 12 | +from .acquisition import LCBacquisition |
| 13 | +from ..problems.problem import Problem |
| 14 | + |
| 15 | +# A base class defining a general framework for Bayesian Optimization |
| 16 | +class BOAlgorithmBase: |
| 17 | + def __init__(self): |
| 18 | + self.acquisition_type = "LCB" # Type of acquisition function (default = "LCB") |
| 19 | + self.xtrain = None # Training data |
| 20 | + self.ytrain = None # Training data |
| 21 | + self.n_iter = 20 # Maximum number of optimization steps |
| 22 | + self.n_start = 10 # estimating acquisition global optima by determining local optima n_start times and then determining the discrete max of that set |
| 23 | + self.q = 1 # batch size |
| 24 | + # save some internal member train |
| 25 | + self.y_hist = None # History of evaluations |
| 26 | + self.x_hist = None # History of evaluations |
| 27 | + self.x_opt = None # Best observed point |
| 28 | + self.y_opt = None # Best observed value |
| 29 | + self.idx_opt = None # Index of the best observed value in the history |
| 30 | + |
| 31 | + # Sets the acquisition function type and batch size |
| 32 | + def setAcquisitionType(self, acquisition_type, q=1): |
| 33 | + self.acquisition_type = acquisition_type |
| 34 | + self.q = q |
| 35 | + |
| 36 | + # Sets the training data |
| 37 | + def setTrainingData(self, xtrain, ytrain): |
| 38 | + self.xtrain = xtrain |
| 39 | + self.ytrain = ytrain |
| 40 | + |
| 41 | + # Method to perform Bayesian optimization |
| 42 | + def optimize(self, fun): |
| 43 | + assert NotImplementedError("Child class of hiopEGO should implement method optimize") |
| 44 | + |
| 45 | + # Method to return the recorded optimization iterations and objectives |
| 46 | + def getOptimizationHistory(self): |
| 47 | + x_hist = np.array(self.x_hist, copy=True) |
| 48 | + y_hist = np.array(self.y_hist, copy=True) |
| 49 | + return x_hist, y_hist |
| 50 | + |
| 51 | + # Method to return the optimal solution and objective |
| 52 | + def getOptimalPoint(self): |
| 53 | + x_opt = np.array(self.x_opt, copy=True) |
| 54 | + y_opt = np.array(self.y_opt, copy=True) |
| 55 | + return x_opt, y_opt |
| 56 | + |
| 57 | +# A subclass of BOAlgorithmBase implementing a full Bayesian Optimization workflow |
| 58 | +class BOAlgorithm(BOAlgorithmBase): |
| 59 | + def __init__(self, gpsurrogate, xtrain, ytrain, acquisition_type = "LCB"): |
| 60 | + super().__init__() |
| 61 | + assert isinstance(gpsurrogate, GaussianProcess) |
| 62 | + assert acquisition_type in ["LCB"] |
| 63 | + self.setTrainingData(xtrain, ytrain) |
| 64 | + self.setAcquisitionType(acquisition_type) |
| 65 | + self.gpsurrogate = gpsurrogate |
| 66 | + self.n_iter = 20 |
| 67 | + self.method = "SLSQP" |
| 68 | + self.bounds = self.gpsurrogate.get_bounds() |
| 69 | + self.constraints = () |
| 70 | + self.options = {"maxiter": 200} |
| 71 | + self.acqf_minimizer_callback = None |
| 72 | + |
| 73 | + # Method to set up a callback function to minimize the acquisition function |
| 74 | + def _setup_acqf_minimizer_callback(self): |
| 75 | + self.acqf_minimizer_callback = lambda fun, x0: pyminimize(fun, x0, self.method, self.bounds, self.constraints, self.options) |
| 76 | + |
| 77 | + # Method to train the GP model |
| 78 | + def _train_surrogate(self, x_train, y_train): |
| 79 | + self.gpsurrogate.train(x_train, y_train) |
| 80 | + |
| 81 | + # Method to find the best next sampling point via optimizing the acquisition function |
| 82 | + def _find_best_point(self, x_train, y_train, x0 = None): |
| 83 | + self._train_surrogate(x_train, y_train) |
| 84 | + |
| 85 | + if self.acquisition_type == "LCB": |
| 86 | + acqf = LCBacquisition(self.gpsurrogate) |
| 87 | + else: |
| 88 | + raise NotImplementedError("No implemented acquisition_type associated to"+self.acquisition_type) |
| 89 | + |
| 90 | + acqf_callback = lambda x: float(np.array(acqf.evaluate(np.atleast_2d(x))).flat[0]) |
| 91 | + |
| 92 | + x_all = [] |
| 93 | + y_all = [] |
| 94 | + for ii in range(self.n_start): |
| 95 | + success = False |
| 96 | + # Generate random starting point if x0 is not provided |
| 97 | + if x0 is None: |
| 98 | + x0 = np.array([uniform(b[0], b[1]) for b in self.bounds]) |
| 99 | + xopt, yout, success = self.acqf_minimizer_callback(acqf_callback, x0) |
| 100 | + |
| 101 | + if success: |
| 102 | + x_all.append(xopt) |
| 103 | + y_all.append(yout) |
| 104 | + |
| 105 | + best_xopt = x_all[np.argmin(np.array(y_all))] |
| 106 | + |
| 107 | + return best_xopt |
| 108 | + |
| 109 | + # Set the optimization method |
| 110 | + def set_method(self, method): |
| 111 | + self.method = method |
| 112 | + |
| 113 | + # Set the user options for the Bayesian optimization |
| 114 | + def set_options(self, options): |
| 115 | + self.options = options |
| 116 | + |
| 117 | + # Method to perform Bayesian optimization |
| 118 | + def optimize(self, prob:Problem): |
| 119 | + x_train = self.xtrain |
| 120 | + y_train = self.ytrain |
| 121 | + |
| 122 | + n_init_sample = np.size(x_train,0) |
| 123 | + print(f"n_init_sample: {n_init_sample}") |
| 124 | + self._setup_acqf_minimizer_callback() |
| 125 | + |
| 126 | + self.x_hist = [] |
| 127 | + self.y_hist = [] |
| 128 | + |
| 129 | + for i in range(self.n_iter): |
| 130 | + print(f"*****************************") |
| 131 | + print(f"Iteration {i+1}/{self.n_iter}") |
| 132 | + |
| 133 | + # Get a new sample point |
| 134 | + x_new = self._find_best_point(x_train, y_train) |
| 135 | + |
| 136 | + # Evaluate the new sample point |
| 137 | + y_new = prob.evaluate(np.atleast_2d(x_new)) |
| 138 | + |
| 139 | + # Update training set |
| 140 | + x_train = np.vstack([x_train, x_new]) |
| 141 | + y_train = np.vstack([y_train, y_new]) |
| 142 | + |
| 143 | + # Save the new sample point and its observation |
| 144 | + self.x_hist.append(x_new) |
| 145 | + self.y_hist.append(y_new) |
| 146 | + |
| 147 | + print(f"Sampled point X: {x_new.flatten()}, Observation Y: {y_new.flatten()}") |
| 148 | + |
| 149 | + # Save the optimal results and all the training data |
| 150 | + self.idx_opt = np.argmin(y_train) |
| 151 | + self.x_opt = x_train[self.idx_opt] |
| 152 | + self.y_opt = y_train[self.idx_opt] |
| 153 | + self.setTrainingData(x_train, y_train) |
| 154 | + |
| 155 | + print() |
| 156 | + print() |
| 157 | + if self.idx_opt < n_init_sample: |
| 158 | + print(f"Optimal at initial sample: {self.idx_opt+1}") |
| 159 | + else: |
| 160 | + print(f"Optimal at BO iteration: {self.idx_opt-n_init_sample+1} ") |
| 161 | + |
| 162 | + print(f"Optimal point: {self.x_opt.flatten()}, Optimal value: {self.y_opt}") |
| 163 | + |
| 164 | + |
| 165 | +# Find the minimum of the input objective `fun`, using the minimize function from SciPy. |
| 166 | +def pyminimize(fun, x0, method, bounds, constraints, options): |
| 167 | + y = minimize(fun, x0, method=method, bounds=bounds, constraints=constraints, options=options) |
| 168 | + success = y.success |
| 169 | + if not success: |
| 170 | + print(y.message) |
| 171 | + xopt = y.x |
| 172 | + yopt = y.fun |
| 173 | + return xopt, yopt, success |
0 commit comments