diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7d10fd310..fe396968f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -42,6 +42,7 @@ /pypesto/select/ @dilpath /pypesto/startpoint/ @PaulJonasJost /pypesto/store/ @PaulJonasJost +/pypesto/visualize/ @stephanmg # Tests /test/base/ @PaulJonasJost @vwiela diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4db56ab55..f55f8f6b5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -157,7 +157,7 @@ jobs: - name: Run tests timeout-minutes: 35 - run: tox -e petab + run: tox -e petab && tox e -e petab -- pip uninstall -y amici env: CC: clang CXX: clang++ diff --git a/.github/workflows/publish_dockerhub.yml b/.github/workflows/publish_dockerhub.yml new file mode 100644 index 000000000..4b05ad1fc --- /dev/null +++ b/.github/workflows/publish_dockerhub.yml @@ -0,0 +1,32 @@ +name: Build and Push Docker Image + +on: + push: + branches: + - main + paths: + - 'docker/**' + - '.github/workflows/docker-publish.yml' + workflow_dispatch: + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Check out the repository + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Build and tag the Docker image + run: | + docker build -t ICB_DCM/pypesto:latest -f docker/Dockerfile . + + - name: Push the Docker image to Docker Hub + run: | + docker push ICB_DCM/pypesto:latest diff --git a/doc/using_pypesto.bib b/doc/using_pypesto.bib index c4c85924d..40ca3247f 100644 --- a/doc/using_pypesto.bib +++ b/doc/using_pypesto.bib @@ -14,63 +14,67 @@ @Article{FalcoCoh2023 } @Article{LakrisenkoSta2023, - author = {Lakrisenko, Polina AND Stapor, Paul AND Grein, Stephan AND Paszkowski, Łukasz AND Pathirana, Dilan AND Fröhlich, Fabian AND Lines, Glenn Terje AND Weindl, Daniel AND Hasenauer, Jan}, - journal = {PLOS Computational Biology}, - title = {Efficient computation of adjoint sensitivities at steady-state in ODE models of biochemical reaction networks}, - year = {2023}, - month = {01}, - number = {1}, - pages = {1-19}, - volume = {19}, - abstract = {Dynamical models in the form of systems of ordinary differential equations have become a standard tool in systems biology. Many parameters of such models are usually unknown and have to be inferred from experimental data. Gradient-based optimization has proven to be effective for parameter estimation. However, computing gradients becomes increasingly costly for larger models, which are required for capturing the complex interactions of multiple biochemical pathways. Adjoint sensitivity analysis has been pivotal for working with such large models, but methods tailored for steady-state data are currently not available. We propose a new adjoint method for computing gradients, which is applicable if the experimental data include steady-state measurements. The method is based on a reformulation of the backward integration problem to a system of linear algebraic equations. The evaluation of the proposed method using real-world problems shows a speedup of total simulation time by a factor of up to 4.4. Our results demonstrate that the proposed approach can achieve a substantial improvement in computation time, in particular for large-scale models, where computational efficiency is critical.}, - creationdate = {2023-01-26T11:19:52}, - doi = {10.1371/journal.pcbi.1010783}, - publisher = {Public Library of Science}, + author = {Lakrisenko, Polina and Stapor, Paul and Grein, Stephan and Paszkowski, Łukasz and Pathirana, Dilan and Fröhlich, Fabian and Lines, Glenn Terje and Weindl, Daniel and Hasenauer, Jan}, + journal = {PLOS Computational Biology}, + title = {Efficient computation of adjoint sensitivities at steady-state in {ODE} models of biochemical reaction networks}, + year = {2023}, + month = {01}, + number = {1}, + pages = {1-19}, + volume = {19}, + abstract = {Dynamical models in the form of systems of ordinary differential equations have become a standard tool in systems biology. Many parameters of such models are usually unknown and have to be inferred from experimental data. Gradient-based optimization has proven to be effective for parameter estimation. However, computing gradients becomes increasingly costly for larger models, which are required for capturing the complex interactions of multiple biochemical pathways. Adjoint sensitivity analysis has been pivotal for working with such large models, but methods tailored for steady-state data are currently not available. We propose a new adjoint method for computing gradients, which is applicable if the experimental data include steady-state measurements. The method is based on a reformulation of the backward integration problem to a system of linear algebraic equations. The evaluation of the proposed method using real-world problems shows a speedup of total simulation time by a factor of up to 4.4. Our results demonstrate that the proposed approach can achieve a substantial improvement in computation time, in particular for large-scale models, where computational efficiency is critical.}, + creationdate = {2023-01-26T11:19:52}, + doi = {10.1371/journal.pcbi.1010783}, + modificationdate = {2024-11-08T18:28:07}, + publisher = {Public Library of Science}, } @Article{SchmiesterSch2021, - author = {Schmiester, Leonard AND Schälte, Yannik AND Bergmann, Frank T. AND Camba, Tacio AND Dudkin, Erika AND Egert, Janine AND Fröhlich, Fabian AND Fuhrmann, Lara AND Hauber, Adrian L. AND Kemmer, Svenja AND Lakrisenko, Polina AND Loos, Carolin AND Merkt, Simon AND Müller, Wolfgang AND Pathirana, Dilan AND Raimúndez, Elba AND Refisch, Lukas AND Rosenblatt, Marcus AND Stapor, Paul L. AND Städter, Philipp AND Wang, Dantong AND Wieland, Franz-Georg AND Banga, Julio R. AND Timmer, Jens AND Villaverde, Alejandro F. AND Sahle, Sven AND Kreutz, Clemens AND Hasenauer, Jan AND Weindl, Daniel}, - journal = {PLOS Computational Biology}, - title = {PEtab—Interoperable specification of parameter estimation problems in systems biology}, - year = {2021}, - month = {01}, - number = {1}, - pages = {1-10}, - volume = {17}, - abstract = {Author summary Parameter estimation is a common and crucial task in modeling, as many models depend on unknown parameters which need to be inferred from data. There exist various tools for tasks like model development, model simulation, optimization, or uncertainty analysis, each with different capabilities and strengths. In order to be able to easily combine tools in an interoperable manner, but also to make results accessible and reusable for other researchers, it is valuable to define parameter estimation problems in a standardized form. Here, we introduce PEtab, a parameter estimation problem definition format which integrates with established systems biology standards for model and data specification. As the novel format is already supported by eight software tools with hundreds of users in total, we expect it to be of great use and impact in the community, both for modeling and algorithm development.}, - creationdate = {2023-01-26T11:30:40}, - doi = {10.1371/journal.pcbi.1008646}, - publisher = {Public Library of Science}, - timestamp = {2021-01-30}, + author = {Schmiester, Leonard and Schälte, Yannik and Bergmann, Frank T. and Camba, Tacio and Dudkin, Erika and Egert, Janine and Fröhlich, Fabian and Fuhrmann, Lara and Hauber, Adrian L. and Kemmer, Svenja and Lakrisenko, Polina and Loos, Carolin and Merkt, Simon and Müller, Wolfgang and Pathirana, Dilan and Raimúndez, Elba and Refisch, Lukas and Rosenblatt, Marcus and Stapor, Paul L. and Städter, Philipp and Wang, Dantong and Wieland, Franz-Georg and Banga, Julio R. and Timmer, Jens 2and Villaverde, Alejandro F. and Sahle, Sven and Kreutz, Clemens and Hasenauer, Jan and Weindl, Daniel}, + journal = {PLOS Computational Biology}, + title = {{PEtab}—Interoperable specification of parameter estimation problems in systems biology}, + year = {2021}, + month = {01}, + number = {1}, + pages = {1-10}, + volume = {17}, + abstract = {Author summary Parameter estimation is a common and crucial task in modeling, as many models depend on unknown parameters which need to be inferred from data. There exist various tools for tasks like model development, model simulation, optimization, or uncertainty analysis, each with different capabilities and strengths. In order to be able to easily combine tools in an interoperable manner, but also to make results accessible and reusable for other researchers, it is valuable to define parameter estimation problems in a standardized form. Here, we introduce PEtab, a parameter estimation problem definition format which integrates with established systems biology standards for model and data specification. As the novel format is already supported by eight software tools with hundreds of users in total, we expect it to be of great use and impact in the community, both for modeling and algorithm development.}, + creationdate = {2023-01-26T11:30:40}, + doi = {10.1371/journal.pcbi.1008646}, + modificationdate = {2024-11-08T18:27:03}, + publisher = {Public Library of Science}, + timestamp = {2021-01-30}, } @Article{MishraWan2023, - author = {Shekhar Mishra and Ziyu Wang and Michael J. Volk and Huimin Zhao}, - journal = {Metabolic Engineering}, - title = {Design and application of a kinetic model of lipid metabolism in Saccharomyces cerevisiae}, - year = {2023}, - issn = {1096-7176}, - pages = {12-18}, - volume = {75}, - abstract = {Lipid biosynthesis plays a vital role in living cells and has been increasingly engineered to overproduce various lipid-based chemicals. However, owing to the tightly constrained and interconnected nature of lipid biosynthesis, both understanding and engineering of lipid metabolism remain challenging, even with the help of mathematical models. Here we report the development of a kinetic metabolic model of lipid metabolism in Saccharomyces cerevisiae that integrates fatty acid biosynthesis, glycerophospholipid metabolism, sphingolipid metabolism, storage lipids, lumped sterol synthesis, and the synthesis and transport of relevant target-chemicals, such as fatty acids and fatty alcohols. The model was trained on lipidomic data of a reference S. cerevisiae strain, single knockout mutants, and lipid overproduction strains reported in literature. The model was used to design mutants for fatty alcohol overproduction and the lipidomic analysis of the resultant mutant strains coupled with model-guided hypothesis led to discovery of a futile cycle in the triacylglycerol biosynthesis pathway. In addition, the model was used to explain successful and unsuccessful mutant designs in metabolic engineering literature. Thus, this kinetic model of lipid metabolism can not only enable the discovery of new phenomenon in lipid metabolism but also the engineering of mutant strains for overproduction of lipids.}, - creationdate = {2023-01-26T11:31:17}, - doi = {https://doi.org/10.1016/j.ymben.2022.11.003}, - keywords = {Lipid metabolism, Kinetic model, Free fatty acid, Fatty alcohol}, + author = {Shekhar Mishra and Ziyu Wang and Michael J. Volk and Huimin Zhao}, + journal = {Metabolic Engineering}, + title = {Design and application of a kinetic model of lipid metabolism in Saccharomyces cerevisiae}, + year = {2023}, + issn = {1096-7176}, + pages = {12-18}, + volume = {75}, + abstract = {Lipid biosynthesis plays a vital role in living cells and has been increasingly engineered to overproduce various lipid-based chemicals. However, owing to the tightly constrained and interconnected nature of lipid biosynthesis, both understanding and engineering of lipid metabolism remain challenging, even with the help of mathematical models. Here we report the development of a kinetic metabolic model of lipid metabolism in Saccharomyces cerevisiae that integrates fatty acid biosynthesis, glycerophospholipid metabolism, sphingolipid metabolism, storage lipids, lumped sterol synthesis, and the synthesis and transport of relevant target-chemicals, such as fatty acids and fatty alcohols. The model was trained on lipidomic data of a reference S. cerevisiae strain, single knockout mutants, and lipid overproduction strains reported in literature. The model was used to design mutants for fatty alcohol overproduction and the lipidomic analysis of the resultant mutant strains coupled with model-guided hypothesis led to discovery of a futile cycle in the triacylglycerol biosynthesis pathway. In addition, the model was used to explain successful and unsuccessful mutant designs in metabolic engineering literature. Thus, this kinetic model of lipid metabolism can not only enable the discovery of new phenomenon in lipid metabolism but also the engineering of mutant strains for overproduction of lipids.}, + creationdate = {2023-01-26T11:31:17}, + doi = {10.1016/j.ymben.2022.11.003}, + keywords = {Lipid metabolism, Kinetic model, Free fatty acid, Fatty alcohol}, + modificationdate = {2024-11-08T18:25:04}, } @Article{FroehlichSor2022, - author = {Fröhlich, Fabian AND Sorger, Peter K.}, - journal = {PLOS Computational Biology}, - title = {Fides: Reliable trust-region optimization for parameter estimation of ordinary differential equation models}, - year = {2022}, - month = {07}, - number = {7}, - pages = {1-28}, - volume = {18}, - abstract = {Ordinary differential equation (ODE) models are widely used to study biochemical reactions in cellular networks since they effectively describe the temporal evolution of these networks using mass action kinetics. The parameters of these models are rarely known a priori and must instead be estimated by calibration using experimental data. Optimization-based calibration of ODE models on is often challenging, even for low-dimensional problems. Multiple hypotheses have been advanced to explain why biochemical model calibration is challenging, including non-identifiability of model parameters, but there are few comprehensive studies that test these hypotheses, likely because tools for performing such studies are also lacking. Nonetheless, reliable model calibration is essential for uncertainty analysis, model comparison, and biological interpretation. We implemented an established trust-region method as a modular Python framework (fides) to enable systematic comparison of different approaches to ODE model calibration involving a variety of Hessian approximation schemes. We evaluated fides on a recently developed corpus of biologically realistic benchmark problems for which real experimental data are available. Unexpectedly, we observed high variability in optimizer performance among different implementations of the same mathematical instructions (algorithms). Analysis of possible sources of poor optimizer performance identified limitations in the widely used Gauss-Newton, BFGS and SR1 Hessian approximation schemes. We addressed these drawbacks with a novel hybrid Hessian approximation scheme that enhances optimizer performance and outperforms existing hybrid approaches. When applied to the corpus of test models, we found that fides was on average more reliable and efficient than existing methods using a variety of criteria. We expect fides to be broadly useful for ODE constrained optimization problems in biochemical models and to be a foundation for future methods development.}, - creationdate = {2023-01-26T11:31:44}, - doi = {10.1371/journal.pcbi.1010322}, - publisher = {Public Library of Science}, + author = {Fröhlich, Fabian and Sorger, Peter K.}, + journal = {PLOS Computational Biology}, + title = {Fides: Reliable trust-region optimization for parameter estimation of ordinary differential equation models}, + year = {2022}, + month = {07}, + number = {7}, + pages = {1-28}, + volume = {18}, + abstract = {Ordinary differential equation (ODE) models are widely used to study biochemical reactions in cellular networks since they effectively describe the temporal evolution of these networks using mass action kinetics. The parameters of these models are rarely known a priori and must instead be estimated by calibration using experimental data. Optimization-based calibration of ODE models on is often challenging, even for low-dimensional problems. Multiple hypotheses have been advanced to explain why biochemical model calibration is challenging, including non-identifiability of model parameters, but there are few comprehensive studies that test these hypotheses, likely because tools for performing such studies are also lacking. Nonetheless, reliable model calibration is essential for uncertainty analysis, model comparison, and biological interpretation. We implemented an established trust-region method as a modular Python framework (fides) to enable systematic comparison of different approaches to ODE model calibration involving a variety of Hessian approximation schemes. We evaluated fides on a recently developed corpus of biologically realistic benchmark problems for which real experimental data are available. Unexpectedly, we observed high variability in optimizer performance among different implementations of the same mathematical instructions (algorithms). Analysis of possible sources of poor optimizer performance identified limitations in the widely used Gauss-Newton, BFGS and SR1 Hessian approximation schemes. We addressed these drawbacks with a novel hybrid Hessian approximation scheme that enhances optimizer performance and outperforms existing hybrid approaches. When applied to the corpus of test models, we found that fides was on average more reliable and efficient than existing methods using a variety of criteria. We expect fides to be broadly useful for ODE constrained optimization problems in biochemical models and to be a foundation for future methods development.}, + creationdate = {2023-01-26T11:31:44}, + doi = {10.1371/journal.pcbi.1010322}, + modificationdate = {2024-11-08T18:20:34}, + publisher = {Public Library of Science}, } @Article{FroehlichGer2022, @@ -242,7 +246,7 @@ @Article{ArrudaSch2023 @Article{MerktAli2024, author = {Merkt, Simon and Ali, Solomon and Gudina, Esayas Kebede and Adissu, Wondimagegn and Gize, Addisu and Muenchhoff, Maximilian and Graf, Alexander and Krebs, Stefan and Elsbernd, Kira and Kisch, Rebecca and Betizazu, Sisay Sirgu and Fantahun, Bereket and Bekele, Delayehu and Rubio-Acero, Raquel and Gashaw, Mulatu and Girma, Eyob and Yilma, Daniel and Zeynudin, Ahmed and Paunovic, Ivana and Hoelscher, Michael and Blum, Helmut and Hasenauer, Jan and Kroidl, Arne and Wieser, Andreas}, journal = {Nature Communications}, - title = {Long-term monitoring of SARS-CoV-2 seroprevalence and variants in Ethiopia provides prediction for immunity and cross-immunity}, + title = {Long-term monitoring of {SARS-CoV-2} seroprevalence and variants in {Ethiopia} provides prediction for immunity and cross-immunity}, year = {2024}, issn = {2041-1723}, month = apr, @@ -250,7 +254,7 @@ @Article{MerktAli2024 volume = {15}, creationdate = {2024-04-29T08:32:16}, doi = {10.1038/s41467-024-47556-2}, - modificationdate = {2024-04-29T08:32:16}, + modificationdate = {2024-11-08T18:27:48}, publisher = {Springer Science and Business Media LLC}, } @@ -282,29 +286,18 @@ @Article{HoepflAlb2024 modificationdate = {2024-05-16T07:58:55}, } -@Misc{LakrisenkoPat2024, - author = {Polina Lakrisenko and Dilan Pathirana and Daniel Weindl and Jan Hasenauer}, - title = {Exploration of methods for computing sensitivities in ODE models at dynamic and steady states}, - year = {2024}, - archiveprefix = {arXiv}, - creationdate = {2024-05-30T09:47:51}, - eprint = {2405.16524}, - modificationdate = {2024-05-30T09:47:51}, - primaryclass = {q-bio.QM}, -} - @Article{PhilippsKoe2024, author = {Maren Philipps and Antonia Körner and Jakob Vanhoefer and Dilan Pathirana and Jan Hasenauer}, + journal = {IFAC-PapersOnLine}, title = {Non-Negative Universal Differential Equations With Applications in Systems Biology}, year = {2024}, - journal = {IFAC-PapersOnLine}, - volume = {58}, + issn = {2405-8963}, number = {23}, pages = {25-30}, - issn = {2405-8963}, - doi = {https://doi.org/10.1016/j.ifacol.2024.10.005}, - url = {https://www.sciencedirect.com/science/article/pii/S2405896324017518}, - abstract = {Universal differential equations (UDEs) leverage the respective advantages of mechanistic models and artificial neural networks and combine them into one dynamic model. However, these hybrid models can suffer from unrealistic solutions, such as negative values for biochemical quantities. We present non-negative UDE (nUDEs), a constrained UDE variant that guarantees non-negative values. Furthermore, we explore regularisation techniques to improve generalisation and interpretability of UDEs.} + volume = {58}, + abstract = {Universal differential equations (UDEs) leverage the respective advantages of mechanistic models and artificial neural networks and combine them into one dynamic model. However, these hybrid models can suffer from unrealistic solutions, such as negative values for biochemical quantities. We present non-negative UDE (nUDEs), a constrained UDE variant that guarantees non-negative values. Furthermore, we explore regularisation techniques to improve generalisation and interpretability of UDEs.}, + doi = {10.1016/j.ifacol.2024.10.005}, + modificationdate = {2024-11-08T18:25:20}, } @Article{SchmiesterBra2024, @@ -319,8 +312,7 @@ @Article{SchmiesterBra2024 creationdate = {2024-08-01T09:44:04}, doi = {10.1158/1078-0432.CCR-24-0244}, eprint = {https://aacrjournals.org/clincancerres/article-pdf/doi/10.1158/1078-0432.CCR-24-0244/3478451/ccr-24-0244.pdf}, - modificationdate = {2024-08-01T09:44:04}, - url = {https://doi.org/10.1158/1078-0432.CCR-24-0244}, + modificationdate = {2024-11-08T18:17:14}, } @InProceedings{JacksonCha2023, @@ -339,4 +331,35 @@ @InProceedings{JacksonCha2023 modificationdate = {2024-09-06T15:49:47}, } +@Article{ArmisteadHoe2024, + author = {Armistead, Joy and Höpfl, Sebastian and Goldhausen, Pierre and Müller-Hartmann, Andrea and Fahle, Evelin and Hatzold, Julia and Franzen, Rainer and Brodesser, Susanne and Radde, Nicole E. and Hammerschmidt, Matthias}, + journal = {Cell Death & Disease}, + title = {A sphingolipid rheostat controls apoptosis versus apical cell extrusion as alternative tumour-suppressive mechanisms}, + year = {2024}, + issn = {2041-4889}, + month = oct, + number = {10}, + volume = {15}, + creationdate = {2024-10-17T16:30:11}, + doi = {10.1038/s41419-024-07134-2}, + modificationdate = {2024-10-17T16:30:11}, + publisher = {Springer Science and Business Media LLC}, +} + +@Article{LakrisenkoPat2024, + author = {Lakrisenko, Polina and Pathirana, Dilan and Weindl, Daniel and Hasenauer, Jan}, + journal = {PLOS ONE}, + title = {Benchmarking methods for computing local sensitivities in ordinary differential equation models at dynamic and steady states}, + year = {2024}, + month = {10}, + number = {10}, + pages = {1-19}, + volume = {19}, + abstract = {Estimating parameters of dynamic models from experimental data is a challenging, and often computationally-demanding task. It requires a large number of model simulations and objective function gradient computations, if gradient-based optimization is used. In many cases, steady-state computation is a part of model simulation, either due to steady-state data or an assumption that the system is at steady state at the initial time point. Various methods are available for steady-state and gradient computation. Yet, the most efficient pair of methods (one for steady states, one for gradients) for a particular model is often not clear. In order to facilitate the selection of methods, we explore six method pairs for computing the steady state and sensitivities at steady state using six real-world problems. The method pairs involve numerical integration or Newton’s method to compute the steady-state, and—for both forward and adjoint sensitivity analysis—numerical integration or a tailored method to compute the sensitivities at steady-state. Our evaluation shows that all method pairs provide accurate steady-state and gradient values, and that the two method pairs that combine numerical integration for the steady-state with a tailored method for the sensitivities at steady-state were the most robust, and amongst the most computationally-efficient. We also observed that while Newton’s method for steady-state computation yields a substantial speedup compared to numerical integration, it may lead to a large number of simulation failures. Overall, our study provides a concise overview across current methods for computing sensitivities at steady state. While our study shows that there is no universally-best method pair, it also provides guidance to modelers in choosing the right methods for a problem at hand.}, + creationdate = {2024-11-08T18:16:35}, + doi = {10.1371/journal.pone.0312148}, + modificationdate = {2024-11-08T18:17:06}, + publisher = {Public Library of Science}, +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py index 2a86e18d9..cdcab2f12 100644 --- a/pypesto/optimize/ess/ess.py +++ b/pypesto/optimize/ess/ess.py @@ -37,6 +37,8 @@ class ESSExitFlag(int, enum.Enum): MAX_EVAL = -2 # Exited after exhausting wall-time budget MAX_TIME = -3 + # Termination because for other reason than exit criteria + ERROR = -99 class OptimizerFactory(Protocol): @@ -401,7 +403,6 @@ def _create_result(self) -> pypesto.Result: for i, optimizer_result in enumerate(self.local_solutions): i_result += 1 optimizer_result.id = f"Local solution {i}" - optimizer_result.optimizer = str(self.local_optimizer) result.optimize_result.append(optimizer_result) if self._result_includes_refset: diff --git a/pypesto/optimize/ess/sacess.py b/pypesto/optimize/ess/sacess.py index 113310f25..9340b23da 100644 --- a/pypesto/optimize/ess/sacess.py +++ b/pypesto/optimize/ess/sacess.py @@ -7,6 +7,7 @@ import multiprocessing import os import time +from contextlib import suppress from dataclasses import dataclass from math import ceil, sqrt from multiprocessing import get_context @@ -20,6 +21,7 @@ import pypesto +from ... import MemoryHistory from ...startpoint import StartpointMethod from ...store.read_from_hdf5 import read_result from ...store.save_to_hdf5 import write_result @@ -331,12 +333,18 @@ def minimize( n_eval_total = sum( worker_result.n_eval for worker_result in self.worker_results ) - logger.info( - f"{self.__class__.__name__} stopped after {walltime:3g}s " - f"and {n_eval_total} objective evaluations " - f"with global best {result.optimize_result[0].fval}." - ) - + if len(result.optimize_result): + logger.info( + f"{self.__class__.__name__} stopped after {walltime:3g}s " + f"and {n_eval_total} objective evaluations " + f"with global best {result.optimize_result[0].fval}." + ) + else: + logger.error( + f"{self.__class__.__name__} stopped after {walltime:3g}s " + f"and {n_eval_total} objective evaluations without producing " + "a result." + ) return result def _create_result(self, problem: Problem) -> pypesto.Result: @@ -345,25 +353,40 @@ def _create_result(self, problem: Problem) -> pypesto.Result: Creates an overall Result object from the results saved by the workers. """ # gather results from workers and delete temporary result files - result = None + result = pypesto.Result() + retry_after_sleep = True for worker_idx in range(self.num_workers): tmp_result_filename = SacessWorker.get_temp_result_filename( worker_idx, self._tmpdir ) + tmp_result = None try: tmp_result = read_result( tmp_result_filename, problem=False, optimize=True ) except FileNotFoundError: # wait and retry, maybe the file wasn't found due to some filesystem latency issues - time.sleep(5) - tmp_result = read_result( - tmp_result_filename, problem=False, optimize=True - ) + if retry_after_sleep: + time.sleep(5) + # waiting once is enough - don't wait for every worker + retry_after_sleep = False + + try: + tmp_result = read_result( + tmp_result_filename, problem=False, optimize=True + ) + except FileNotFoundError: + logger.error( + f"Worker {worker_idx} did not produce a result." + ) + continue + else: + logger.error( + f"Worker {worker_idx} did not produce a result." + ) + continue - if result is None: - result = tmp_result - else: + if tmp_result: result.optimize_result.append( tmp_result.optimize_result, sort=False, @@ -375,7 +398,8 @@ def _create_result(self, problem: Problem) -> pypesto.Result: filename = SacessWorker.get_temp_result_filename( worker_idx, self._tmpdir ) - os.remove(filename) + with suppress(FileNotFoundError): + os.remove(filename) # delete tmpdir if empty try: self._tmpdir.rmdir() @@ -397,6 +421,7 @@ class SacessManager: Attributes ---------- + _dim: Dimension of the optimization problem _num_workers: Number of workers _ess_options: ESS options for each worker _best_known_fx: Best objective value encountered so far @@ -410,6 +435,7 @@ class SacessManager: _rejection_threshold: Threshold for relative objective improvements that incoming solutions have to pass to be accepted _lock: Lock for accessing shared state. + _terminate: Flag to signal termination of the SACESS run to workers _logger: A logger instance _options: Further optimizer hyperparameters. """ @@ -421,6 +447,7 @@ def __init__( dim: int, options: SacessOptions = None, ): + self._dim = dim self._options = options or SacessOptions() self._num_workers = len(ess_options) self._ess_options = [shmem_manager.dict(o) for o in ess_options] @@ -440,6 +467,7 @@ def __init__( self._worker_scores = shmem_manager.Array( "d", range(self._num_workers) ) + self._terminate = shmem_manager.Value("b", False) self._worker_comms = shmem_manager.Array("i", [0] * self._num_workers) self._lock = shmem_manager.RLock() self._logger = logging.getLogger() @@ -550,6 +578,16 @@ def submit_solution( ) self._rejections.value = 0 + def abort(self): + """Abort the SACESS run.""" + with self._lock: + self._terminate.value = True + + def aborted(self) -> bool: + """Whether this run was aborted.""" + with self._lock: + return self._terminate.value + class SacessWorker: """A SACESS worker. @@ -641,7 +679,7 @@ def run( ess = self._setup_ess(startpoint_method) # run ESS until exit criteria are met, but start at least one iteration - while self._keep_going() or ess.n_iter == 0: + while self._keep_going(ess) or ess.n_iter == 0: # perform one ESS iteration ess._do_iteration() @@ -667,19 +705,42 @@ def run( f"(best: {self._best_known_fx}, " f"n_eval: {ess.evaluator.n_eval})." ) - - ess.history.finalize(exitflag=ess.exit_flag.name) - worker_result = SacessWorkerResult( - x=ess.x_best, - fx=ess.fx_best, - history=ess.history, - n_eval=ess.evaluator.n_eval, - n_iter=ess.n_iter, - exit_flag=ess.exit_flag, - ) + self._finalize(ess) + + def _finalize(self, ess: ESSOptimizer = None): + """Finalize the worker.""" + # Whatever happens here, we need to put something to the queue before + # returning to avoid deadlocks. + worker_result = None + if ess is not None: + try: + ess.history.finalize(exitflag=ess.exit_flag.name) + ess._report_final() + worker_result = SacessWorkerResult( + x=ess.x_best, + fx=ess.fx_best, + history=ess.history, + n_eval=ess.evaluator.n_eval, + n_iter=ess.n_iter, + exit_flag=ess.exit_flag, + ) + except Exception as e: + self._logger.exception( + f"Worker {self._worker_idx} failed to finalize: {e}" + ) + if worker_result is None: + # Create some dummy result + worker_result = SacessWorkerResult( + x=np.full(self._manager._dim, np.nan), + fx=np.nan, + history=MemoryHistory(), + n_eval=0, + n_iter=0, + exit_flag=ESSExitFlag.ERROR, + ) self._manager._result_queue.put(worker_result) + self._logger.debug(f"Final configuration: {self._ess_kwargs}") - ess._report_final() def _setup_ess(self, startpoint_method: StartpointMethod) -> ESSOptimizer: """Run ESS.""" @@ -821,7 +882,7 @@ def replace_solution(refset: RefSet, x: np.ndarray, fx: float): fx=fx, ) - def _keep_going(self): + def _keep_going(self, ess: ESSOptimizer) -> bool: """Check exit criteria. Returns @@ -830,14 +891,26 @@ def _keep_going(self): """ # elapsed time if time.time() - self._start_time >= self._max_walltime_s: - self.exit_flag = ESSExitFlag.MAX_TIME + ess.exit_flag = ESSExitFlag.MAX_TIME self._logger.debug( f"Max walltime ({self._max_walltime_s}s) exceeded." ) return False - + # other reasons for termination (some worker failed, ...) + if self._manager.aborted(): + ess.exit_flag = ESSExitFlag.ERROR + self._logger.debug("Manager requested termination.") + return False return True + def abort(self): + """Send signal to abort.""" + self._logger.error(f"Worker {self._worker_idx} aborting.") + # signal to manager + self._manager.abort() + + self._finalize(None) + @staticmethod def get_temp_result_filename(worker_idx: int, tmpdir: str | Path) -> str: return str(Path(tmpdir, f"sacess-{worker_idx:02d}_tmp.h5").absolute()) @@ -853,15 +926,24 @@ def _run_worker( Helper function as entrypoint for sacess worker processes. """ - # different random seeds per process - np.random.seed((os.getpid() * int(time.time() * 1000)) % 2**32) - - # Forward log messages to the logging process - h = logging.handlers.QueueHandler(log_process_queue) - worker._logger = logging.getLogger(multiprocessing.current_process().name) - worker._logger.addHandler(h) + try: + # different random seeds per process + np.random.seed((os.getpid() * int(time.time() * 1000)) % 2**32) + + # Forward log messages to the logging process + h = logging.handlers.QueueHandler(log_process_queue) + worker._logger = logging.getLogger( + multiprocessing.current_process().name + ) + worker._logger.addHandler(h) - return worker.run(problem=problem, startpoint_method=startpoint_method) + return worker.run(problem=problem, startpoint_method=startpoint_method) + except Exception as e: + with suppress(Exception): + worker._logger.exception( + f"Worker {worker._worker_idx} failed: {e}" + ) + worker.abort() def get_default_ess_options( diff --git a/pypesto/optimize/optimizer.py b/pypesto/optimize/optimizer.py index b05570e73..659303aed 100644 --- a/pypesto/optimize/optimizer.py +++ b/pypesto/optimize/optimizer.py @@ -39,7 +39,7 @@ def __init__(self, optimizer: str): def hierarchical_decorator(minimize): """Add inner parameters to the optimizer result. - Default decorator for the minimize() method. + Default decorator for the :meth:`Optimizer.minimize` method. """ @wraps(minimize) @@ -81,7 +81,7 @@ def wrapped_minimize( def history_decorator(minimize): """Initialize and extract information stored in the history. - Default decorator for the minimize() method. + Default decorator for the :meth:`Optimizer.minimize` method. """ @wraps(minimize) @@ -140,7 +140,11 @@ def wrapped_minimize( logger.error(f"start {id} failed:\n{trace}") result = OptimizerResult( - x0=x0, exitflag=-1, message=str(err), id=id + x0=x0, + exitflag=-1, + message=str(err), + id=id, + optimizer=str(self), ) else: raise @@ -163,7 +167,7 @@ def wrapped_minimize( def time_decorator(minimize): """Measure time of optimization. - Default decorator for the minimize() method to take time. + Default decorator for the :meth:`Optimizer.minimize` method to take time. Currently, the method time.time() is used, which measures the wall-clock time. """ @@ -196,8 +200,8 @@ def wrapped_minimize( def fix_decorator(minimize): """Include also fixed parameters in the result arrays of minimize(). - Default decorator for the minimize() method (nans will be inserted in the - derivatives). + Default decorator for the :meth:`Optimizer.minimize` method (nans will be + inserted in the derivatives). """ @wraps(minimize) @@ -523,6 +527,7 @@ def fun(x): hess=getattr(res, "hess", None), exitflag=res.status, message=res.message, + optimizer=str(self), ) return optimizer_result @@ -612,7 +617,10 @@ def minimize( # the ipopt return object is a scipy.optimize.OptimizeResult return OptimizerResult( - x=ret.x, exitflag=ret.status, message=ret.message + x=ret.x, + exitflag=ret.status, + message=ret.message, + optimizer=str(self), ) def is_least_squares(self): @@ -630,7 +638,7 @@ def __init__(self, options: dict = None): if self.options is None: self.options = DlibOptimizer.get_default_options(self) elif "maxiter" not in self.options: - raise KeyError("Dlib options are missing the key word " "maxiter.") + raise KeyError("Dlib options are missing the keyword maxiter.") def __repr__(self) -> str: rep = f"<{self.__class__.__name__}" @@ -677,7 +685,7 @@ def get_fval_vararg(*x): 0.002, ) - optimizer_result = OptimizerResult() + optimizer_result = OptimizerResult(optimizer=str(self)) return optimizer_result @@ -737,7 +745,9 @@ def minimize( problem.objective.get_fval, lb, ub, **self.options ) - optimizer_result = OptimizerResult(x=np.array(xopt), fval=fopt) + optimizer_result = OptimizerResult( + x=np.array(xopt), fval=fopt, optimizer=str(self) + ) return optimizer_result @@ -821,7 +831,7 @@ def minimize( ) optimizer_result = OptimizerResult( - x=np.array(result[0]), fval=result[1] + x=np.array(result[0]), fval=result[1], optimizer=str(self) ) return optimizer_result @@ -901,7 +911,7 @@ def minimize( ) optimizer_result = OptimizerResult( - x=np.array(result.x), fval=result.fun + x=np.array(result.x), fval=result.fun, optimizer=str(self) ) return optimizer_result @@ -1019,6 +1029,7 @@ def successively_working_fval(swarm: np.ndarray) -> np.ndarray: optimizer_result = OptimizerResult( x=pos, fval=float(cost), + optimizer=str(self), ) return optimizer_result @@ -1169,7 +1180,7 @@ def __repr__(self) -> str: if self.options is not None: rep += f" options={self.options}" if self.local_options is not None: - rep += f" local_options={self.local_methods}" + rep += f" local_options={self.local_options}" return rep + ">" @minimize_decorator_collection @@ -1249,6 +1260,7 @@ def nlopt_objective(x, grad): fval=opt.last_optimum_value(), message=msg, exitflag=opt.last_optimize_result(), + optimizer=str(self), ) return optimizer_result @@ -1433,6 +1445,7 @@ def minimize( hess=opt.hess, message=msg, exitflag=opt.exitflag, + optimizer=str(self), ) return optimizer_result diff --git a/pypesto/optimize/task.py b/pypesto/optimize/task.py index 10ae83dd8..7482097e1 100644 --- a/pypesto/optimize/task.py +++ b/pypesto/optimize/task.py @@ -63,7 +63,6 @@ def execute(self) -> OptimizerResult: history_options=self.history_options, optimize_options=self.optimize_options, ) - optimizer_result.optimizer = str(self.optimizer) if not self.optimize_options.report_hess: optimizer_result.hess = None diff --git a/pypesto/petab/objective_creator.py b/pypesto/petab/objective_creator.py index 72f98cf03..697c0f9a3 100644 --- a/pypesto/petab/objective_creator.py +++ b/pypesto/petab/objective_creator.py @@ -7,7 +7,6 @@ import os import re import shutil -import sys import warnings from abc import ABC, abstractmethod from collections.abc import Iterable, Sequence @@ -145,10 +144,6 @@ def create_model( f"compilation: Not a folder." ) - # add module to path - if self.output_folder not in sys.path: - sys.path.insert(0, self.output_folder) - # compile if self._must_compile(force_compile): logger.info( diff --git a/pypesto/store/save_to_hdf5.py b/pypesto/store/save_to_hdf5.py index 3804bfbbb..a4ac4e703 100644 --- a/pypesto/store/save_to_hdf5.py +++ b/pypesto/store/save_to_hdf5.py @@ -1,23 +1,22 @@ """Include functions for saving various results to hdf5.""" +from __future__ import annotations import logging import os from numbers import Integral from pathlib import Path -from typing import Union import h5py import numpy as np +from .. import OptimizeResult, OptimizerResult from ..result import ProfilerResult, Result, SampleResult from .hdf5 import write_array, write_float_array logger = logging.getLogger(__name__) -def check_overwrite( - f: Union[h5py.File, h5py.Group], overwrite: bool, target: str -): +def check_overwrite(f: h5py.File | h5py.Group, overwrite: bool, target: str): """ Check whether target already exists. @@ -36,7 +35,7 @@ def check_overwrite( del f[target] else: raise RuntimeError( - f"File `{f.filename}` already exists and contains " + f"File `{f.file.filename}` already exists and contains " f"information about {target} result. " f"If you wish to overwrite the file, set " f"`overwrite=True`." @@ -53,7 +52,7 @@ class ProblemHDF5Writer: HDF5 result file name """ - def __init__(self, storage_filename: Union[str, Path]): + def __init__(self, storage_filename: str | Path): """ Initialize writer. @@ -106,7 +105,7 @@ class OptimizationResultHDF5Writer: HDF5 result file name """ - def __init__(self, storage_filename: Union[str, Path]): + def __init__(self, storage_filename: str | Path): """ Initialize Writer. @@ -117,32 +116,76 @@ def __init__(self, storage_filename: Union[str, Path]): """ self.storage_filename = str(storage_filename) - def write(self, result: Result, overwrite=False): - """Write HDF5 result file from pyPESTO result object.""" - # Create destination directory - if isinstance(self.storage_filename, str): - basedir = os.path.dirname(self.storage_filename) - if basedir: - os.makedirs(basedir, exist_ok=True) + def write( + self, + result: Result + | OptimizeResult + | OptimizerResult + | list[OptimizerResult], + overwrite=False, + ): + """Write HDF5 result file from pyPESTO result object. + + Parameters + ---------- + result: Result to be saved. + overwrite: Boolean, whether already existing results should be + overwritten. This applies to the whole list of results, not only to + individual results. See :meth:`write_optimizer_result` for + incrementally writing a sequence of `OptimizerResult`. + """ + Path(self.storage_filename).parent.mkdir(parents=True, exist_ok=True) + + if isinstance(result, Result): + results = result.optimize_result.list + elif isinstance(result, OptimizeResult): + results = result.list + elif isinstance(result, list): + results = result + elif isinstance(result, OptimizerResult): + results = [result] + else: + raise ValueError(f"Unsupported type for `result`: {type(result)}.") with h5py.File(self.storage_filename, "a") as f: check_overwrite(f, overwrite, "optimization") optimization_grp = f.require_group("optimization") - # settings = - # optimization_grp.create_dataset("settings", settings, dtype=) results_grp = optimization_grp.require_group("results") - for start in result.optimize_result.list: - start_id = start["id"] - start_grp = results_grp.require_group(start_id) - for key in start.keys(): - if key == "history": - continue - if isinstance(start[key], np.ndarray): - write_array(start_grp, key, start[key]) - elif start[key] is not None: - start_grp.attrs[key] = start[key] - f.flush() + for start in results: + self._do_write_optimizer_result(start, results_grp, overwrite) + + def write_optimizer_result( + self, result: OptimizerResult, overwrite: bool = False + ): + """Write HDF5 result file from pyPESTO result object. + + Parameters + ---------- + result: Result to be saved. + overwrite: Boolean, whether already existing results with the same ID + should be overwritten.s + """ + Path(self.storage_filename).parent.mkdir(parents=True, exist_ok=True) + + with h5py.File(self.storage_filename, "a") as f: + results_grp = f.require_group("optimization/results") + self._do_write_optimizer_result(result, results_grp, overwrite) + + def _do_write_optimizer_result( + self, result: OptimizerResult, g: h5py.Group = None, overwrite=False + ): + """Write an OptimizerResult to the given group.""" + sub_group_id = result["id"] + check_overwrite(g, overwrite, sub_group_id) + start_grp = g.require_group(sub_group_id) + for key in result.keys(): + if key == "history": + continue + if isinstance(result[key], np.ndarray): + write_array(start_grp, key, result[key]) + elif result[key] is not None: + start_grp.attrs[key] = result[key] class SamplingResultHDF5Writer: @@ -155,7 +198,7 @@ class SamplingResultHDF5Writer: HDF5 result file name """ - def __init__(self, storage_filename: Union[str, Path]): + def __init__(self, storage_filename: str | Path): """ Initialize Writer. @@ -208,7 +251,7 @@ class ProfileResultHDF5Writer: HDF5 result file name """ - def __init__(self, storage_filename: Union[str, Path]): + def __init__(self, storage_filename: str | Path): """ Initialize Writer. @@ -241,7 +284,7 @@ def write(self, result: Result, overwrite: bool = False): @staticmethod def _write_profiler_result( - parameter_profile: Union[ProfilerResult, None], result_grp: h5py.Group + parameter_profile: ProfilerResult | None, result_grp: h5py.Group ) -> None: """Write a single ProfilerResult to hdf5. @@ -267,7 +310,7 @@ def _write_profiler_result( def write_result( result: Result, - filename: Union[str, Path], + filename: str | Path, overwrite: bool = False, problem: bool = True, optimize: bool = False, diff --git a/pytest.ini b/pytest.ini index c852c729e..82ae58578 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,3 +2,4 @@ addopts = "--doctest-modules" filterwarnings = ignore:.*inspect.getargspec\(\) is deprecated.*:DeprecationWarning +norecursedirs = amici_models diff --git a/setup.cfg b/setup.cfg index bc28ae610..d9eb70919 100644 --- a/setup.cfg +++ b/setup.cfg @@ -105,7 +105,8 @@ ipopt = dlib = dlib >= 19.19.0 nlopt = - nlopt >= 2.6.2 + # != 2.9.0: https://github.com/stevengj/nlopt/issues/575 + nlopt >= 2.6.2, != 2.9.0 pyswarm = pyswarm >= 0.6 cma = diff --git a/test/base/test_engine.py b/test/base/test_engine.py index 6db8e79c3..1481c4ca1 100644 --- a/test/base/test_engine.py +++ b/test/base/test_engine.py @@ -86,7 +86,14 @@ def test_deepcopy_objective(): ) ) factory = petab_importer.create_objective_creator() - objective = factory.create_objective() + amici_model = factory.create_model() + amici_model.setSteadyStateSensitivityMode( + amici.SteadyStateSensitivityMode.integrateIfNewtonFails + ) + amici_model.setSteadyStateComputationMode( + amici.SteadyStateComputationMode.integrateIfNewtonFails + ) + objective = factory.create_objective(model=amici_model) objective.amici_solver.setSensitivityMethod( amici.SensitivityMethod_adjoint diff --git a/test/base/test_store.py b/test/base/test_store.py index d90a8030d..840440c70 100644 --- a/test/base/test_store.py +++ b/test/base/test_store.py @@ -1,9 +1,11 @@ """Test the `pypesto.store` module.""" import os -import tempfile +from pathlib import Path +from tempfile import TemporaryDirectory import numpy as np +import pytest import scipy.optimize as so import pypesto @@ -52,7 +54,7 @@ def test_storage_opt_result(): minimize_result = create_optimization_result() - with tempfile.TemporaryDirectory(dir=".") as tmpdirname: + with TemporaryDirectory(dir=".") as tmpdirname: result_file_name = os.path.join(tmpdirname, "a", "b", "result.h5") opt_result_writer = OptimizationResultHDF5Writer(result_file_name) opt_result_writer.write(minimize_result) @@ -89,6 +91,27 @@ def test_storage_opt_result_update(hdf5_file): assert opt_res[key] == read_result.optimize_result[i][key] +def test_write_optimizer_results_incrementally(): + """Test writing optimizer results incrementally to the same file.""" + res = create_optimization_result() + res1, res2 = res.optimize_result.list[:2] + + with TemporaryDirectory() as tmp_dir: + result_path = Path(tmp_dir, "result.h5") + writer = OptimizationResultHDF5Writer(result_path) + writer.write_optimizer_result(res1) + writer.write_optimizer_result(res2) + reader = OptimizationResultHDF5Reader(result_path) + read_res = reader.read() + assert len(read_res.optimize_result) == 2 + + # overwriting works + writer.write_optimizer_result(res1, overwrite=True) + # overwriting attempt fails without overwrite=True + with pytest.raises(RuntimeError): + writer.write_optimizer_result(res1) + + def test_storage_problem(hdf5_file): problem = create_problem() problem_writer = ProblemHDF5Writer(hdf5_file) diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py index 48ebdea55..6f8260b90 100644 --- a/test/optimize/test_optimize.py +++ b/test/optimize/test_optimize.py @@ -18,6 +18,7 @@ import pypesto import pypesto.optimize as optimize +from pypesto import Objective from pypesto.optimize.ess import ( ESSOptimizer, FunctionEvaluatorMP, @@ -308,6 +309,7 @@ def check_minimize(problem, library, solver, allow_failed_starts=False): ]: assert np.isfinite(result.optimize_result.list[0]["fval"]) assert result.optimize_result.list[0]["x"] is not None + assert result.optimize_result.list[0]["optimizer"] is not None def test_trim_results(problem): @@ -577,6 +579,40 @@ def test_ess_refset_repr(): ) +class FunctionOrError: + """Callable that raises an error every nth invocation.""" + + def __init__(self, fun, error_frequency=100): + self.counter = 0 + self.error_frequency = error_frequency + self.fun = fun + + def __call__(self, *args, **kwargs): + self.counter += 1 + if self.counter % self.error_frequency == 0: + raise RuntimeError("Intentional error.") + return self.fun(*args, **kwargs) + + +def test_sacess_worker_error(capsys): + """Check that SacessOptimizer does not hang if an error occurs on a worker.""" + objective = Objective( + fun=FunctionOrError(sp.optimize.rosen), grad=sp.optimize.rosen_der + ) + problem = pypesto.Problem( + objective=objective, lb=0 * np.ones((1, 2)), ub=1 * np.ones((1, 2)) + ) + sacess = SacessOptimizer( + num_workers=2, + max_walltime_s=2, + sacess_loglevel=logging.DEBUG, + ess_loglevel=logging.DEBUG, + ) + res = sacess.minimize(problem) + assert isinstance(res, pypesto.Result) + assert "Intentional error." in capsys.readouterr().err + + def test_scipy_integrated_grad(): integrated = True obj = rosen_for_sensi(max_sensi_order=2, integrated=integrated)["obj"] diff --git a/test/petab/test_amici_objective.py b/test/petab/test_amici_objective.py index 274962fc1..5ff4512b4 100644 --- a/test/petab/test_amici_objective.py +++ b/test/petab/test_amici_objective.py @@ -86,12 +86,21 @@ def test_preeq_guesses(): importer = pypesto.petab.PetabImporter.from_yaml( os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) - problem = importer.create_problem() - obj = problem.objective - obj.amici_solver.setNewtonMaxSteps(0) + obj_creator = importer.create_objective_creator() + amici_model = obj_creator.create_model() + amici_model.setSteadyStateComputationMode( + amici.SteadyStateComputationMode.integrateIfNewtonFails + ) + amici_model.setSteadyStateSensitivityMode( + amici.SteadyStateSensitivityMode.integrateIfNewtonFails + ) + obj = obj_creator.create_objective(model=amici_model) + problem = importer.create_problem(objective=obj) obj.amici_model.setSteadyStateSensitivityMode( amici.SteadyStateSensitivityMode.integrationOnly ) + obj = problem.objective + obj.amici_solver.setNewtonMaxSteps(0) obj.amici_solver.setAbsoluteTolerance(1e-12) obj.amici_solver.setRelativeTolerance(1e-12) diff --git a/test/petab/test_amici_predictor.py b/test/petab/test_amici_predictor.py index 2d23620a4..d99fa7d52 100644 --- a/test/petab/test_amici_predictor.py +++ b/test/petab/test_amici_predictor.py @@ -2,7 +2,6 @@ import os import shutil -import sys import amici import libsbml @@ -34,13 +33,10 @@ def conversion_reaction_model(): # try to import the exisiting model, if possible try: - sys.path.insert(0, os.path.abspath(model_output_dir)) model_module = amici.import_model_module(model_name, model_output_dir) model = model_module.getModel() except ValueError: # read in and adapt the sbml slightly - if os.path.abspath(model_output_dir) in sys.path: - sys.path.remove(os.path.abspath(model_output_dir)) sbml_importer = amici.SbmlImporter(sbml_file) # add observables to sbml model @@ -95,7 +91,6 @@ def create_intial_assignment(sbml_model, spec_id): ) # Importing the module and loading the model - sys.path.insert(0, os.path.abspath(model_output_dir)) model_module = amici.import_model_module(model_name, model_output_dir) model = model_module.getModel() except RuntimeError as err: @@ -107,7 +102,6 @@ def create_intial_assignment(sbml_model, spec_id): "Delete the conversion_reaction_enhanced model from your python " "path and retry. Your python path is currently:" ) - print(sys.path) print("Original error message:") raise err diff --git a/test/petab/test_sbml_conversion.py b/test/petab/test_sbml_conversion.py index 22653a0a6..7284fb8a7 100644 --- a/test/petab/test_sbml_conversion.py +++ b/test/petab/test_sbml_conversion.py @@ -1,6 +1,5 @@ import os import re -import sys import unittest import warnings @@ -11,8 +10,6 @@ from ..util import load_amici_objective -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - optimizers = { "scipy": [ "Nelder-Mead", diff --git a/tox.ini b/tox.ini index e9193e0ad..d1937e4ee 100644 --- a/tox.ini +++ b/tox.ini @@ -29,7 +29,7 @@ envlist = # Base-environment [testenv] -passenv = AMICI_PARALLEL_COMPILE,CC,CXX,MPLBACKEND +passenv = AMICI_PARALLEL_COMPILE,CC,CXX,MPLBACKEND,BNGPATH # Sub-environments # inherit settings defined in the base @@ -75,10 +75,14 @@ description = Test basic functionality on Windows [testenv:petab] -extras = test,amici,petab,pyswarm,roadrunner +extras = test,petab,pyswarm,roadrunner deps = git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master\#subdirectory=src/python - git+https://github.com/AMICI-dev/amici.git@develop\#egg=amici&subdirectory=python/sdist +# always install amici from develop branch, avoid caching +# to skip re-installation, run `tox -e petab --override testenv:petab.commands_pre=` +commands_pre = + python3 -m pip uninstall -y amici + python3 -m pip install git+https://github.com/AMICI-dev/amici.git@develop\#egg=amici&subdirectory=python/sdist commands = python3 -m pip install git+https://github.com/PEtab-dev/petab_test_suite@main python3 -m pip install git+https://github.com/pysb/pysb@master