Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Select: problem-specific minimize method for SaCeSS #1339

Merged
merged 32 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from 31 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
c69c80d
let users supply calibration results
dilpath Feb 1, 2024
456a30a
Merge branch 'develop' into select_use_old_calibrations
dilpath Mar 26, 2024
5f62604
minimize method maker for sacess
Mar 26, 2024
d51432a
doc
Mar 26, 2024
bdb9ba6
user-supplied constructor args
Mar 26, 2024
98a3da0
doc how to specify e.g. `max_walltime_s`
Mar 26, 2024
927a28b
allow saving of sacess histories
Mar 26, 2024
37f3d8d
Merge branch 'develop' into select_use_old_calibrations
Doresic Mar 27, 2024
1c66dc0
Merge remote-tracking branch 'origin/select_use_old_calibrations' int…
Doresic Mar 27, 2024
5683f9d
handle calibrated model via petab select
dilpath Mar 27, 2024
af86b25
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Mar 27, 2024
7fe5782
custom petab select branch
dilpath Mar 27, 2024
bfa2021
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Mar 27, 2024
3b51af9
functionality moved to petab-select
dilpath Mar 28, 2024
1c69991
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Apr 20, 2024
3859aef
Merge branch 'develop' into select_use_old_calibrations
Doresic Aug 15, 2024
aa6f481
change Iterable import
Doresic Aug 15, 2024
e6b5ead
Merge remote-tracking branch 'origin/select_use_old_calibrations' int…
Doresic Aug 15, 2024
01802e2
update method for next petab_select version
dilpath Oct 7, 2024
f221448
update for petab select
dilpath Oct 7, 2024
bdfd18e
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Nov 6, 2024
ffc46cb
Merge branch 'develop' into select_use_old_calibrations
dilpath Nov 6, 2024
88b01e5
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Nov 6, 2024
62a0f27
Merge branch 'develop' into select_use_old_calibrations
dilpath Nov 11, 2024
0fdfae0
update for next petab-select version
dilpath Nov 11, 2024
7488194
Merge branch 'select_use_old_calibrations' into select_sacess_minimiz…
dilpath Nov 11, 2024
6ea2b45
fix tmpdir
dilpath Nov 12, 2024
b24f4e8
handle no user tmpdir
dilpath Nov 12, 2024
f56d744
test SacessMinimizeMethod partially
dilpath Nov 12, 2024
d86d5d8
include fides dependency in select tests
dilpath Nov 12, 2024
65176dd
Update pypesto/select/misc.py
dilpath Nov 12, 2024
2de945e
Merge branch 'develop' into select_sacess_minimize_method
dilpath Nov 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pypesto/select/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"""

from . import postprocessors
from .misc import model_to_pypesto_problem
from .misc import SacessMinimizeMethod, model_to_pypesto_problem
from .problem import Problem

try:
Expand Down
80 changes: 51 additions & 29 deletions pypesto/select/method.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
import numpy as np
import petab_select
from petab_select import (
CANDIDATE_SPACE,
MODELS,
PREDECESSOR_MODEL,
UNCALIBRATED_MODELS,
VIRTUAL_INITIAL_MODEL,
CandidateSpace,
Criterion,
Expand Down Expand Up @@ -213,6 +217,11 @@ class MethodCaller:
Specify the predecessor (initial) model for the model selection
algorithm. If ``None``, then the algorithm will generate an initial
predecessor model if required.
user_calibrated_models:
Supply calibration results for models yourself, as a list of models.
If a model with the same hash is encountered in the current model
selection run, and the user-supplied calibrated model has the
`criterion` value set, the model will not be calibrated again.
select_first_improvement:
If ``True``, model selection will terminate as soon as a better model
is found. If `False`, all candidate models will be tested.
Expand Down Expand Up @@ -245,6 +254,7 @@ def __init__(
# TODO deprecated
model_to_pypesto_problem_method: Callable[[Any], Problem] = None,
model_problem_options: dict = None,
user_calibrated_models: list[Model] = None,
):
"""Arguments are used in every `__call__`, unless overridden."""
self.petab_select_problem = petab_select_problem
Expand All @@ -256,6 +266,12 @@ def __init__(
self.select_first_improvement = select_first_improvement
self.startpoint_latest_mle = startpoint_latest_mle

self.user_calibrated_models = {}
if user_calibrated_models is not None:
self.user_calibrated_models = {
model.get_hash(): model for model in user_calibrated_models
}

self.logger = MethodLogger()

# TODO deprecated
Expand Down Expand Up @@ -335,10 +351,7 @@ def __init__(
# May have changed from `None` to `petab_select.VIRTUAL_INITIAL_MODEL`
self.predecessor_model = self.candidate_space.get_predecessor_model()

def __call__(
self,
newly_calibrated_models: Optional[dict[str, Model]] = None,
) -> tuple[list[Model], dict[str, Model]]:
def __call__(self) -> tuple[list[Model], dict[str, Model]]:
"""Run a single iteration of the model selection method.

A single iteration here refers to calibration of all candidate models.
Expand All @@ -347,14 +360,6 @@ def __call__(
of all models that have both: the same 3 estimated parameters; and 1
additional estimated parameter.

The input `newly_calibrated_models` is from the previous iteration. The
output `newly_calibrated_models` is from the current iteration.

Parameters
----------
newly_calibrated_models:
The newly calibrated models from the previous iteration.

Returns
-------
A 2-tuple, with the following values:
Expand All @@ -366,39 +371,56 @@ def __call__(
# All calibrated models in this iteration (see second return value).
self.logger.new_selection()

candidate_space = petab_select.ui.candidates(
iteration = petab_select.ui.start_iteration(
problem=self.petab_select_problem,
candidate_space=self.candidate_space,
limit=self.limit,
calibrated_models=self.calibrated_models,
newly_calibrated_models=newly_calibrated_models,
excluded_model_hashes=self.calibrated_models.keys(),
criterion=self.criterion,
user_calibrated_models=self.user_calibrated_models,
)
predecessor_model = self.candidate_space.predecessor_model

if not candidate_space.models:
if not iteration[UNCALIBRATED_MODELS]:
raise StopIteration("No valid models found.")

# TODO parallelize calibration (maybe not sensible if
# `self.select_first_improvement`)
newly_calibrated_models = {}
for candidate_model in candidate_space.models:
# autoruns calibration
self.new_model_problem(model=candidate_model)
newly_calibrated_models[
candidate_model.get_hash()
] = candidate_model
calibrated_models = {}
for model in iteration[UNCALIBRATED_MODELS]:
if (
model.get_criterion(
criterion=self.criterion,
compute=True,
raise_on_failure=False,
)
is not None
):
self.logger.log(
message=(
"Unexpected calibration result already available for "
f"model: `{model.get_hash()}`. Skipping "
"calibration."
),
level="warning",
)
else:
self.new_model_problem(model=model)

calibrated_models[model.get_hash()] = model
method_signal = self.handle_calibrated_model(
model=candidate_model,
predecessor_model=predecessor_model,
model=model,
predecessor_model=iteration[PREDECESSOR_MODEL],
)
if method_signal.proceed == MethodSignalProceed.STOP:
break

self.calibrated_models.update(newly_calibrated_models)
iteration_results = petab_select.ui.end_iteration(
candidate_space=iteration[CANDIDATE_SPACE],
calibrated_models=calibrated_models,
)

self.calibrated_models.update(iteration_results[MODELS])

return predecessor_model, newly_calibrated_models
return iteration[PREDECESSOR_MODEL], iteration_results[MODELS]

def handle_calibrated_model(
self,
Expand Down
82 changes: 82 additions & 0 deletions pypesto/select/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import logging
from collections.abc import Iterable
from pathlib import Path

import pandas as pd
import petab.v1 as petab
Expand All @@ -10,7 +11,13 @@
from petab_select import Model, parameter_string_to_value
from petab_select.constants import PETAB_PROBLEM

from ..history import Hdf5History
from ..objective import Objective
from ..optimize import Optimizer
from ..optimize.ess import (
SacessOptimizer,
get_default_ess_options,
)
from ..petab import PetabImporter
from ..problem import Problem

Expand Down Expand Up @@ -164,3 +171,78 @@ def correct_x_guesses(
corrected_x_guess.append(corrected_value)
corrected_x_guesses.append(corrected_x_guess)
return corrected_x_guesses


class SacessMinimizeMethod:
"""Create a minimize method for SaCeSS that adapts to each problem.

When a pyPESTO SaCeSS optimizer is created, it takes the problem
dimension as input. Hence, an optimizer needs to be constructed for
each problem. Objects of this class act like a minimize method for model
selection, but a new problem-specific SaCeSS optimizer will be created
every time a model is minimized.

Instance attributes correspond to pyPESTO's SaCeSS optimizer, and are
documented there. Extra keyword arguments supplied to the constructor
will be passed on to the constructor of the SaCeSS optimizer, for example,
`max_walltime_s` can be specified in this way. If specified, `tmpdir` will
be treated as a parent directory.
"""

def __init__(
self,
num_workers: int,
local_optimizer: Optimizer = None,
tmpdir: str | Path | None = None,
save_history: bool = False,
**optimizer_kwargs,
):
"""Construct a minimize-like object."""
self.num_workers = num_workers
self.local_optimizer = local_optimizer
self.optimizer_kwargs = optimizer_kwargs
self.save_history = save_history

self.tmpdir = tmpdir
if self.tmpdir is not None:
self.tmpdir = Path(self.tmpdir)

if self.save_history and self.tmpdir is None:
self.tmpdir = Path.cwd() / "sacess_tmpdir"

def __call__(self, problem: Problem, model_hash: str, **minimize_options):
"""Create then run a problem-specific sacess optimizer."""
# create optimizer
ess_init_args = get_default_ess_options(
num_workers=self.num_workers,
dim=problem.dim,
)
for x in ess_init_args:
x["local_optimizer"] = self.local_optimizer
model_tmpdir = None
if self.tmpdir is not None:
model_tmpdir = self.tmpdir / model_hash
model_tmpdir.mkdir(exist_ok=False, parents=True)

ess = SacessOptimizer(
ess_init_args=ess_init_args,
tmpdir=model_tmpdir,
**self.optimizer_kwargs,
)

# optimize
result = ess.minimize(
problem=problem,
**minimize_options,
)

if self.save_history:
history_dir = model_tmpdir / "history"
history_dir.mkdir(exist_ok=False, parents=True)
for history_index, history in enumerate(ess.histories):
Hdf5History.from_history(
other=history,
file=history_dir / (str(history_index) + ".hdf5"),
id_=history_index,
)
return result
17 changes: 15 additions & 2 deletions pypesto/select/model_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from ..optimize import minimize
from ..problem import Problem
from ..result import OptimizerResult, Result
from .misc import model_to_pypesto_problem
from .misc import SacessMinimizeMethod, model_to_pypesto_problem

OBJECTIVE_CUSTOMIZER_TYPE = Callable[[ObjectiveBase], None]
TYPE_POSTPROCESSOR = Callable[["ModelProblem"], None] # noqa: F821
Expand Down Expand Up @@ -142,9 +142,16 @@ def __init__(
def minimize(self) -> Result:
"""Optimize the model.

Returns:
Returns
-------
The optimization result.
"""
if isinstance(self.minimize_method, SacessMinimizeMethod):
return self.minimize_method(
self.pypesto_problem,
model_hash=self.model.get_hash(),
**self.minimize_options,
)
return self.minimize_method(
self.pypesto_problem,
**self.minimize_options,
Expand Down Expand Up @@ -195,6 +202,12 @@ def create_fake_pypesto_result_from_fval(
----------
fval:
The objective function value.
evaluation_time:
CPU time taken to compute the objective function value.

Returns
-------
The dummy result.
"""
result = Result()

Expand Down
40 changes: 10 additions & 30 deletions pypesto/select/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,16 +156,13 @@ def select(
self.handle_select_kwargs(kwargs)
# TODO handle bidirectional
method_caller = self.create_method_caller(**kwargs)
previous_best_model, newly_calibrated_models = method_caller(
# TODO add predecessor model to state
newly_calibrated_models=self.newly_calibrated_models,
)
previous_best_model, newly_calibrated_models = method_caller()

self.update_with_newly_calibrated_models(
newly_calibrated_models=newly_calibrated_models,
)

best_model = petab_select.ui.best(
best_model = petab_select.ui.get_best(
problem=self.petab_select_problem,
models=self.newly_calibrated_models.values(),
criterion=method_caller.criterion,
Expand Down Expand Up @@ -198,9 +195,7 @@ def select_to_completion(

while True:
try:
previous_best_model, newly_calibrated_models = method_caller(
newly_calibrated_models=self.newly_calibrated_models,
)
previous_best_model, newly_calibrated_models = method_caller()
self.update_with_newly_calibrated_models(
newly_calibrated_models=newly_calibrated_models,
)
Expand Down Expand Up @@ -247,33 +242,18 @@ def multistart_select(
"""
self.handle_select_kwargs(kwargs)
model_lists = []
newly_calibrated_models_list = [
self.newly_calibrated_models for _ in predecessor_models
]

method_caller = self.create_method_caller(**kwargs)
for start_index, predecessor_model in enumerate(predecessor_models):
method_caller.candidate_space.previous_predecessor_model = (
predecessor_model
)
(
best_model,
newly_calibrated_models_list[start_index],
) = method_caller(
newly_calibrated_models=newly_calibrated_models_list[
start_index
],
)
self.calibrated_models.update(
newly_calibrated_models_list[start_index]
for predecessor_model in predecessor_models:
method_caller = self.create_method_caller(
**(kwargs | {"predecessor_model": predecessor_model})
)
(best_model, models) = method_caller()
self.calibrated_models |= models

model_lists.append(
newly_calibrated_models_list[start_index].values()
)
model_lists.append(list(models.values()))
method_caller.candidate_space.reset()

best_model = petab_select.ui.best(
best_model = petab_select.ui.get_best(
problem=method_caller.petab_select_problem,
models=[model for models in model_lists for model in models],
criterion=method_caller.criterion,
Expand Down
6 changes: 4 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,12 @@ example =
ipywidgets >= 8.1.5
benchmark_models_petab @ git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master#subdirectory=src/python
select =
# Remove when vis is moved to PEtab Select version
# Remove when vis is moved to PEtab Select
networkx >= 2.5.1
# End remove
petab-select >= 0.1.12
#petab-select >= 0.1.12
# FIXME before merge
petab-select @ git+https://github.com/PEtab-dev/petab_select.git@develop
test =
pytest >= 5.4.3
pytest-cov >= 2.10.0
Expand Down
Loading