From 6ed399cb254584763fe802c902d029dcab458f5e Mon Sep 17 00:00:00 2001 From: fmfn Date: Tue, 12 Feb 2019 12:09:00 -0500 Subject: [PATCH] Increase test coverage This commit adds test to a number of previously untested functions and methods. Most notably it adds test for the maximize method of the main BO object. - Add tests for observer subscription and gp params - Add missing tests for util file - Test base tracker observer --- bayes_opt/util.py | 24 ----- tests/test_bayesian_optimization.py | 144 ++++++++++++++++++++++++++++ tests/test_observer.py | 58 +++++++++++ tests/test_util.py | 39 +++++++- 4 files changed, 240 insertions(+), 25 deletions(-) diff --git a/bayes_opt/util.py b/bayes_opt/util.py index 67fba9d9b..44edbcbed 100644 --- a/bayes_opt/util.py +++ b/bayes_opt/util.py @@ -238,27 +238,3 @@ def underline(cls, s): def yellow(cls, s): """Wrap text in yellow.""" return cls._wrap_colour(s, cls.YELLOW) - - -# def unique_rows(a): -# """ -# A function to trim repeated rows that may appear when optimizing. -# This is necessary to avoid the sklearn GP object from breaking - -# :param a: array to trim repeated rows from - -# :return: mask of unique rows -# """ -# if a.size == 0: -# return np.empty((0,)) - -# # Sort array and kep track of where things should go back to -# order = np.lexsort(a.T) -# reorder = np.argsort(order) - -# a = a[order] -# diff = np.diff(a, axis=0) -# ui = np.ones(len(a), 'bool') -# ui[1:] = (diff != 0).any(axis=1) - -# return ui[reorder] diff --git a/tests/test_bayesian_optimization.py b/tests/test_bayesian_optimization.py index a2c31e753..205362d3b 100644 --- a/tests/test_bayesian_optimization.py +++ b/tests/test_bayesian_optimization.py @@ -2,6 +2,8 @@ import numpy as np from bayes_opt import UtilityFunction from bayes_opt import BayesianOptimization +from bayes_opt.observer import ScreenLogger +from bayes_opt.event import Events, DEFAULT_EVENTS def target_func(**kwargs): @@ -139,6 +141,148 @@ def test_prime_queue_with_register_and_init(): assert len(optimizer.space) == 1 +def test_prime_subscriptions(): + optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer._prime_subscriptions() + + # Test that the default observer is correctly subscribed + for event in DEFAULT_EVENTS: + assert all([ + isinstance(k, ScreenLogger) for k in + optimizer._events[event].keys() + ]) + assert all([ + hasattr(k, "update") for k in + optimizer._events[event].keys() + ]) + + test_subscriber = "test_subscriber" + def test_callback(event, instance): + pass + + optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer.subscribe( + event=Events.OPTMIZATION_START, + subscriber=test_subscriber, + callback=test_callback, + ) + # Test that the desired observer is subscribed + assert all([ + k == test_subscriber for k in + optimizer._events[Events.OPTMIZATION_START].keys() + ]) + assert all([ + v == test_callback for v in + optimizer._events[Events.OPTMIZATION_START].values() + ]) + + # Check that prime subscriptions won't overight manual subscriptions + optimizer._prime_subscriptions() + assert all([ + k == test_subscriber for k in + optimizer._events[Events.OPTMIZATION_START].keys() + ]) + assert all([ + v == test_callback for v in + optimizer._events[Events.OPTMIZATION_START].values() + ]) + + assert optimizer._events[Events.OPTMIZATION_STEP] == {} + assert optimizer._events[Events.OPTMIZATION_END] == {} + + with pytest.raises(KeyError): + optimizer._events["other"] + + +def test_set_bounds(): + pbounds = { + 'p1': (0, 1), + 'p3': (0, 3), + 'p2': (0, 2), + 'p4': (0, 4), + } + optimizer = BayesianOptimization(target_func, pbounds, random_state=1) + + # Ignore unknown keys + optimizer.set_bounds({"other": (7, 8)}) + assert all(optimizer.space.bounds[:, 0] == np.array([0, 0, 0, 0])) + assert all(optimizer.space.bounds[:, 1] == np.array([1, 2, 3, 4])) + + # Update bounds accordingly + optimizer.set_bounds({"p2": (1, 8)}) + assert all(optimizer.space.bounds[:, 0] == np.array([0, 1, 0, 0])) + assert all(optimizer.space.bounds[:, 1] == np.array([1, 8, 3, 4])) + + +def test_set_gp_params(): + optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + assert optimizer._gp.alpha == 1e-6 + assert optimizer._gp.n_restarts_optimizer == 25 + + optimizer.set_gp_params(alpha=1e-2) + assert optimizer._gp.alpha == 1e-2 + assert optimizer._gp.n_restarts_optimizer == 25 + + optimizer.set_gp_params(n_restarts_optimizer=7) + assert optimizer._gp.alpha == 1e-2 + assert optimizer._gp.n_restarts_optimizer == 7 + + +def test_maximize(): + from sklearn.exceptions import NotFittedError + class Tracker: + def __init__(self): + self.start_count = 0 + self.step_count = 0 + self.end_count = 0 + + def update_start(self, event, instance): + self.start_count += 1 + + def update_step(self, event, instance): + self.step_count += 1 + + def update_end(self, event, instance): + self.end_count += 1 + + def reset(self): + self.__init__() + + optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + + tracker = Tracker() + optimizer.subscribe( + event=Events.OPTMIZATION_START, + subscriber=tracker, + callback=tracker.update_start, + ) + optimizer.subscribe( + event=Events.OPTMIZATION_STEP, + subscriber=tracker, + callback=tracker.update_step, + ) + optimizer.subscribe( + event=Events.OPTMIZATION_END, + subscriber=tracker, + callback=tracker.update_end, + ) + + optimizer.maximize(init_points=0, n_iter=0) + assert optimizer._queue.empty + assert len(optimizer.space) == 1 + assert tracker.start_count == 1 + assert tracker.step_count == 1 + assert tracker.end_count == 1 + + optimizer.maximize(init_points=2, n_iter=0, alpha=1e-2) + assert optimizer._queue.empty + assert len(optimizer.space) == 3 + assert optimizer._gp.alpha == 1e-2 + assert tracker.start_count == 2 + assert tracker.step_count == 3 + assert tracker.end_count == 2 + + if __name__ == '__main__': r""" CommandLine: diff --git a/tests/test_observer.py b/tests/test_observer.py index 0575a2ba9..e6d2d92a7 100644 --- a/tests/test_observer.py +++ b/tests/test_observer.py @@ -1,4 +1,6 @@ from bayes_opt.bayesian_optimization import Observable +from bayes_opt.observer import ScreenLogger, _Tracker, _get_default_logger +from bayes_opt.event import Events EVENTS = ["a", "b", "c"] @@ -63,6 +65,62 @@ def test_dispatch(): assert observer_a.counter == 2 +def test_tracker(): + class MockInstance: + def __init__(self, max_target=1, max_params=[1, 1]): + self._max_target = max_target + self._max_params = max_params + + @property + def max(self): + return {"target": self._max_target, "params": self._max_params} + + tracker = _Tracker() + assert tracker._iterations == 0 + assert tracker._previous_max is None + assert tracker._previous_max_params is None + + test_instance = MockInstance() + tracker._update_tracker("other_event", test_instance) + assert tracker._iterations == 0 + assert tracker._previous_max is None + assert tracker._previous_max_params is None + + tracker._update_tracker(Events.OPTMIZATION_STEP, test_instance) + assert tracker._iterations == 1 + assert tracker._previous_max == 1 + assert tracker._previous_max_params == [1, 1] + + new_instance = MockInstance(max_target=7, max_params=[7, 7]) + tracker._update_tracker(Events.OPTMIZATION_STEP, new_instance) + assert tracker._iterations == 2 + assert tracker._previous_max == 7 + assert tracker._previous_max_params == [7, 7] + + other_instance = MockInstance(max_target=2, max_params=[2, 2]) + tracker._update_tracker(Events.OPTMIZATION_STEP, other_instance) + assert tracker._iterations == 3 + assert tracker._previous_max == 7 + assert tracker._previous_max_params == [7, 7] + + tracker._time_metrics() + start_time = tracker._start_time + previous_time = tracker._previous_time + + tracker._time_metrics() + assert start_time == tracker._start_time + assert previous_time < tracker._previous_time + + +def test_get_default_logger(): + logger = _get_default_logger(verbose=1) + assert isinstance(logger, ScreenLogger) + assert logger._verbose == 1 + + logger = _get_default_logger(verbose=2) + assert logger._verbose == 2 + + if __name__ == '__main__': r""" CommandLine: diff --git a/tests/test_util.py b/tests/test_util.py index 04ecf5deb..4430b93da 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,7 +1,9 @@ +import pytest import numpy as np from bayes_opt import BayesianOptimization -from bayes_opt.util import UtilityFunction, acq_max, load_logs, ensure_rng +from bayes_opt.util import UtilityFunction, Colours +from bayes_opt.util import acq_max, load_logs, ensure_rng from sklearn.gaussian_process.kernels import Matern from sklearn.gaussian_process import GaussianProcessRegressor @@ -51,6 +53,20 @@ def brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0): X, Y, GP, MESH = GLOB['x'], GLOB['y'], GLOB['gp'], GLOB['mesh'] +def test_utility_fucntion(): + util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0) + assert util.kind == "ucb" + + util = UtilityFunction(kind="ei", kappa=1.0, xi=1.0) + assert util.kind == "ei" + + util = UtilityFunction(kind="poi", kappa=1.0, xi=1.0) + assert util.kind == "poi" + + with pytest.raises(NotImplementedError): + util = UtilityFunction(kind="other", kappa=1.0, xi=1.0) + + def test_acq_with_ucb(): util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0) episilon = 1e-2 @@ -130,6 +146,27 @@ def f(x, y): load_logs(other_optimizer, ["./tests/test_logs.json"]) +def test_colours(): + colour_wrappers = [ + (Colours.BLUE, Colours.blue), + (Colours.BOLD, Colours.bold), + (Colours.CYAN, Colours.cyan), + (Colours.DARKCYAN, Colours.darkcyan), + (Colours.GREEN, Colours.green), + (Colours.PURPLE, Colours.purple), + (Colours.RED, Colours.red), + (Colours.UNDERLINE, Colours.underline), + (Colours.YELLOW, Colours.yellow), + ] + + for colour, wrapper in colour_wrappers: + text1 = Colours._wrap_colour("test", colour) + text2 = wrapper("test") + + assert text1.split("test") == [colour, Colours.END] + assert text2.split("test") == [colour, Colours.END] + + if __name__ == '__main__': r""" CommandLine: