diff --git a/mlos_bench/mlos_bench/optimizers/__init__.py b/mlos_bench/mlos_bench/optimizers/__init__.py index 865c51088c..7e06652a74 100644 --- a/mlos_bench/mlos_bench/optimizers/__init__.py +++ b/mlos_bench/mlos_bench/optimizers/__init__.py @@ -58,6 +58,16 @@ instances, so mlos_bench handles conversions internally (see :py:mod:`mlos_bench.optimizers.convert_configspace`). +Space Adapters +^^^^^^^^^^^^^^ + +When using the :py:class:`.MlosCoreOptimizer`, you can also specify a +``space_adapter_type`` to use for manipulating the configuration space into +something that may help the Optimizer find better configurations more quickly +(e.g., by automatically doing space reduction). + +See the :py:mod:`mlos_core.spaces.adapters` module for more information. + Config ++++++ @@ -83,9 +93,14 @@ .. code-block:: json { + // One of the mlos_bench Optimizer classes from this module. "class": "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer", + "description": "MlosCoreOptimizer", + + // Optional configuration properties for the selected Optimizer class. "config": { + // Common properties for all Optimizers: "max_suggestions": 1000, "optimization_targets": { // Your optimization target(s) mapped to their respective @@ -96,20 +111,6 @@ "start_with_defaults": true, "seed": 42, - // Optionally override the default space adapter type. - // Must be one of the mlos_core SpaceAdapterType enum values. - // e.g., LlamaTune is a method for automatically doing space reduction - // from the original space. - "space_adapter_type": "LLAMATUNE", - "space_adapter_config": { - // Optional space adapter configuration. - // The JSON schema controls the valid properties here. - // In general check the constructor arguments of the specified - // SpaceAdapterType. - "num_low_dims": 10, - "max_unique_values_per_param": 20, - } - // Now starts a collection of key-value pairs that are specific to // the Optimizer class chosen. @@ -124,6 +125,21 @@ // to the corresponding OptimizerType in the mlos_core module. "n_random_init": 20, "n_random_probability": 0.25, // increased to prioritize exploration + + // In the case of an MlosCoreOptimizer, override the default space + // adapter type. + // Must be one of the mlos_core SpaceAdapterType enum values. + // e.g., LlamaTune is a method for automatically doing space reduction + // from the original space. + "space_adapter_type": "LLAMATUNE", + "space_adapter_config": { + // Optional space adapter configuration. + // The JSON schema controls the valid properties here. + // In general check the constructor arguments of the specified + // SpaceAdapterType. + "num_low_dims": 10, + "max_unique_values_per_param": 20, + }, } However, it can also be as simple as the following and sane defaults will be @@ -135,6 +151,25 @@ "class": "mlos_bench.optimizers.MlosCoreOptimizer" } +Or to only override the space adapter type: + +.. code-block:: json + + { + "class": "mlos_bench.optimizers.MlosCoreOptimizer", + "config": { + "space_adapter_type": "LLAMATUNE" + } + } + +Or, to use a different class for suggesting configurations: + +.. code-block:: json + + { + "class": "mlos_bench.optimizers.GridSearchOptimizer" + } + Notes ----- The full set of supported properties is specified in the `JSON schemas for optimizers @@ -152,12 +187,12 @@ Note: All of the examples in this module are expressed in Python for testing purposes. ->>> # Load tunables from a JSON string. ->>> # Note: normally these would be automatically loaded from the Environment(s)'s ->>> # `include_tunables` config parameter. ->>> # +Load tunables from a JSON string. +Note: normally these would be automatically loaded from the +:py:mod:`~mlos_bench.environments.base_environment.Environment`'s +``include_tunables`` config parameter. + >>> import json5 as json ->>> import mlos_core.optimizers >>> from mlos_bench.environments.status import Status >>> from mlos_bench.services.config_persistence import ConfigPersistenceService >>> service = ConfigPersistenceService() @@ -190,25 +225,51 @@ >>> tunables.get_param_values() {'flags': 'auto', 'int_param': 10, 'float_param': 50.0} ->>> # Load a JSON config string for an MlosCoreOptimizer. ->>> # You must specify an mlos_bench Optimizer class in the JSON config. ->>> # (e.g., "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer") ->>> # All optimizers support the following config properties at a minimum: +Next we'll load an Optimizer from a JSON string. + +At a minimum, the JSON config must specify the Optimizer ``class`` to use (e.g., +one of the classes from this module). + +(e.g., ``"class": "mlos_bench.optimizers.MlosCoreOptimizer"``) + +>>> # All optimizers support the following optional config properties at a +>>> # minimum: >>> sorted(Optimizer.BASE_SUPPORTED_CONFIG_PROPS) ['max_suggestions', 'optimization_targets', 'seed', 'start_with_defaults'] ->>> # When using the MlosCoreOptimizer, we can also specify some additional ->>> # properties, for instance the optimizer_type, which is one of the mlos_core ->>> # OptimizerType enum values: +When using the :py:class:`.MlosCoreOptimizer`, we can also specify some +additional properties, for instance the ``optimizer_type``, which is one of the +mlos_core :py:data:`~mlos_core.optimizers.OptimizerType` enum values: + +>>> import mlos_core.optimizers >>> print([member.name for member in mlos_core.optimizers.OptimizerType]) ['RANDOM', 'FLAML', 'SMAC'] ->>> # We can also specify an optional space_adapter_type, which can sometimes ->>> # help manipulate the configuration space to something more manageable. +These may also include their own configuration options, which can be specified +as additional key-value pairs in the ``config`` section, where each key-value +corresponds to an argument to the respective OptimizerTypes's constructor. +See :py:meth:`mlos_core.optimizers.OptimizerFactory.create` for more details. + +Other Optimizers may also have their own configuration options. +See each class' documentation for details. + +When using :py:class:`.MlosCoreOptimizer`, we can also specify an optional an +``space_adapter_type``, which can sometimes help manipulate the configuration +space to something more manageable. It should be one of the following +:py:data:`~mlos_core.spaces.adapters.SpaceAdapterType` enum values: + +>>> import mlos_core.spaces.adapters >>> print([member.name for member in mlos_core.spaces.adapters.SpaceAdapterType]) ['IDENTITY', 'LLAMATUNE'] ->>> # Here's an example JSON config for an MlosCoreOptimizer. +These may also include their own configuration options, which can be specified +as additional key-value pairs in the optional ``space_adapter_config`` section, +where each key-value corresponds to an argument to the respective +OptimizerTypes's constructor. See +:py:meth:`mlos_core.spaces.adapters.SpaceAdapterFactory.create` for more details. + +Here's an example JSON config for an :py:class:`.MlosCoreOptimizer`. + >>> optimizer_json_config = ''' ... { ... "class": "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer", @@ -232,18 +293,23 @@ ... // Must be one of the mlos_core SpaceAdapterType enum values. ... // LlamaTune is a method for automatically doing space reduction ... // from the original space. -... /* +... /* Not enabled for this example: ... "space_adapter_type": "LLAMATUNE", ... "space_adapter_config": { ... // Note: these values are probably too low, ... // but it's just for demonstration. ... "num_low_dims": 2, ... "max_unique_values_per_param": 10, -... } +... }, ... */ ... } ... } ... ''' + +That config will typically be loaded via the ``--optimizer`` command-line +argument to the :py:mod:`mlos_bench ` CLI. +However, for demonstration purposes, we can load it directly here: + >>> config = json.loads(optimizer_json_config) >>> optimizer = service.build_optimizer( ... tunables=tunables, @@ -251,6 +317,13 @@ ... config=config, ... ) +Now the :py:mod:`mlos_bench.schedulers` can use the selected +:py:class:`.Optimizer` to :py:meth:`.Optimizer.suggest` a new config to test in +a Trial and then :py:meth:`.Optimizer.register` the results. + +A stripped down example of how this might look in practice is something like +this: + >>> suggested_config_1 = optimizer.suggest() >>> # Default should be suggested first, per json config. >>> suggested_config_1.get_param_values() diff --git a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py index 1bb769689c..c1b0b8f513 100644 --- a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py @@ -17,10 +17,11 @@ Examples -------- ->>> # Load tunables from a JSON string. ->>> # Note: normally these would be automatically loaded from the Environment(s)'s ->>> # `include_tunables` config parameter. ->>> # +Load tunables from a JSON string. +Note: normally these would be automatically loaded from the +:py:mod:`~mlos_bench.environments.base_environment.Environment`'s +``include_tunables`` config parameter. + >>> import json5 as json >>> from mlos_bench.environments.status import Status >>> from mlos_bench.services.config_persistence import ConfigPersistenceService @@ -56,7 +57,8 @@ >>> tunables.get_param_values() {'colors': 'green', 'int_param': 2, 'float_param': 0.5} ->>> # Now create a GridSearchOptimizer from a JSON config string. +Now create a :py:class:`.GridSearchOptimizer` from a JSON config string. + >>> optimizer_json_config = ''' ... { ... "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", @@ -79,6 +81,9 @@ 27 >>> next(grid_search_optimizer.pending_configs) {'colors': 'red', 'float_param': 0, 'int_param': 1} + +Here are some examples of suggesting and registering configurations. + >>> suggested_config_1 = grid_search_optimizer.suggest() >>> # Default should be suggested first, per json config. >>> suggested_config_1.get_param_values() diff --git a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py index 4a9b35f8dd..0ef9751228 100644 --- a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py @@ -7,8 +7,8 @@ Config ------ -The JSON config for an :py:class:`.MlosCoreOptimizer` generally takes the -following basic structure: +:py:mod:`mlos_bench.optimizers` has an overview of the configuration options for +the py:mod:`.MlosCoreOptimizer`. See Also -------- @@ -21,10 +21,11 @@ Examples -------- ->>> # Load tunables from a JSON string. ->>> # Note: normally these would be automatically loaded from the Environment(s)'s ->>> # `include_tunables` config parameter. ->>> # +Load tunables from a JSON string. +Note: normally these would be automatically loaded from the +:py:mod:`~mlos_bench.environments.base_environment.Environment`'s +``include_tunables`` config parameter. + >>> import json5 as json >>> import mlos_core.optimizers >>> from mlos_bench.environments.status import Status @@ -59,19 +60,39 @@ >>> tunables.get_param_values() {'flags': 'auto', 'int_param': 10, 'float_param': 50.0} ->>> # When using the MlosCoreOptimizer, we can also specify some additional ->>> # properties, for instance the optimizer_type, which is one of the mlos_core ->>> # OptimizerType enum values: ->>> print([member.name for member in mlos_core.optimizers.OptimizerType]) -['RANDOM', 'FLAML', 'SMAC'] +When using the :py:class:`.MlosCoreOptimizer`, we can also specify some +additional properties, for instance the ``optimizer_type``, which is one of the +mlos_core :py:data:`~mlos_core.optimizers.OptimizerType` enum values: ->>> # We can also specify ->>> # properties, for instance the optimizer_type, which is one of the mlos_core ->>> # OptimizerType enum values: +>>> import mlos_core.optimizers >>> print([member.name for member in mlos_core.optimizers.OptimizerType]) ['RANDOM', 'FLAML', 'SMAC'] ->>> # Here's an example JSON config for an MlosCoreOptimizer. +These may also include their own configuration options, which can be specified +as additional key-value pairs in the ``config`` section, where each key-value +corresponds to an argument to the respective OptimizerTypes's constructor. +See :py:meth:`mlos_core.optimizers.OptimizerFactory.create` for more details. + +Other Optimizers may also have their own configuration options. +See each class' documentation for details. + +When using :py:class:`.MlosCoreOptimizer`, we can also specify an optional an +``space_adapter_type``, which can sometimes help manipulate the configuration +space to something more manageable. It should be one of the following +:py:data:`~mlos_core.spaces.adapters.SpaceAdapterType` enum values: + +>>> import mlos_core.spaces.adapters +>>> print([member.name for member in mlos_core.spaces.adapters.SpaceAdapterType]) +['IDENTITY', 'LLAMATUNE'] + +These may also include their own configuration options, which can be specified +as additional key-value pairs in the optional ``space_adapter_config`` section, +where each key-value corresponds to an argument to the respective +OptimizerTypes's constructor. See +:py:meth:`mlos_core.spaces.adapters.SpaceAdapterFactory.create` for more details. + +Here's an example JSON config for an :py:class:`.MlosCoreOptimizer`. + >>> optimizer_json_config = ''' ... { ... "class": "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer", @@ -105,6 +126,11 @@ ... } ... } ... ''' + +That config will typically be loaded via the ``--optimizer`` command-line +argument to the :py:mod:`mlos_bench ` CLI. +However, for demonstration purposes, we can load it directly here: + >>> config = json.loads(optimizer_json_config) >>> optimizer = service.build_optimizer( ... tunables=tunables, @@ -112,6 +138,9 @@ ... config=config, ... ) +Internally the Scheduler will call the Optimizer's methods to suggest +configurations, like so: + >>> suggested_config_1 = optimizer.suggest() >>> # Normally default values should be suggested first, per json config. >>> # However, since LlamaTune is being employed here, the first suggestion may @@ -167,7 +196,7 @@ class MlosCoreOptimizer(Optimizer): - """A wrapper class for the mlos_core optimizers.""" + """A wrapper class for the :py:mod:`mlos_core.optimizers`.""" def __init__( self, diff --git a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py index e57ebbd0a6..18a793e482 100644 --- a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py @@ -9,10 +9,11 @@ Examples -------- ->>> # Load tunables from a JSON string. ->>> # Note: normally these would be automatically loaded from the Environment(s)'s ->>> # `include_tunables` config parameter. ->>> # +Load tunables from a JSON string. +Note: normally these would be automatically loaded from the +:py:mod:`~mlos_bench.environments.base_environment.Environment`'s +``include_tunables`` config parameter. + >>> import json5 as json >>> from mlos_bench.environments.status import Status >>> from mlos_bench.services.config_persistence import ConfigPersistenceService @@ -48,10 +49,10 @@ >>> tunables.get_param_values() {'colors': 'green', 'int_param': 2, 'float_param': 0.5} ->>> # Load a JSON config of some tunable values to explicitly test. ->>> # Normally these would be provided by the ->>> # `mlos_bench --tunable-values` ->>> # CLI option. +Load a JSON config of some tunable values to explicitly test. +Normally these would be provided by the :py:mod:`mlos_bench.run` CLI's +``--tunable-values`` option. + >>> tunable_values_json = ''' ... { ... "colors": "red", @@ -64,7 +65,8 @@ {'colors': 'red', 'int_param': 1, 'float_param': 0.0} >>> assert not tunables.is_defaults() ->>> # Now create a OneShotOptimizer from a JSON config string. +Now create a OneShotOptimizer from a JSON config string. + >>> optimizer_json_config = ''' ... { ... "class": "mlos_bench.optimizers.one_shot_optimizer.OneShotOptimizer", @@ -76,7 +78,9 @@ ... service=service, ... config=config, ... ) ->>> # Run the optimizer. + +Run the optimizer. + >>> # Note that it will only run for a single iteration and return the values we set. >>> while optimizer.not_converged(): ... suggestion = optimizer.suggest()