Skip to content

Commit

Permalink
New petabjl syntax (#1131)
Browse files Browse the repository at this point in the history
* Altered syntax

* Adhere to new petabl.jl 2.0 version syntax

* Changed pyPESTO syntax accordingly.

* Removed version from PEtab.jl

* bound syntax

* Update doc/example/conversion_reaction/PEtabJl_module.jl
  • Loading branch information
PaulJonasJost authored Oct 19, 2023
1 parent a52ae70 commit 520b718
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 66 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ jobs:
run: .github/workflows/install_deps.sh

- name: Install PEtabJL dependencies
run: julia -e 'using Pkg; Pkg.add(name="PEtab", version="1.4.2"); Pkg.add("OrdinaryDiffEq"), Pkg.add("Sundials")'
run: julia -e 'using Pkg; Pkg.add("PEtab"); Pkg.add("OrdinaryDiffEq"), Pkg.add("Sundials")'

- name: Run tests
timeout-minutes: 25
Expand Down
14 changes: 7 additions & 7 deletions doc/example/conversion_reaction/PEtabJl_module.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@ using Sundials
using PEtab

pathYaml = "/Users/pauljonasjost/Documents/GitHub_Folders/pyPESTO/test/julia/../../doc/example/conversion_reaction/conversion_reaction.yaml"
petabModel = readPEtabModel(pathYaml, verbose=true)
petabModel = PEtabModel(pathYaml, verbose=true)

# A full list of options for createPEtabODEProblem can be found at https://sebapersson.github.io/PEtab.jl/dev/API_choosen/#PEtab.setupPEtabODEProblem
petabProblem = createPEtabODEProblem(
# A full list of options for PEtabODEProblem can be found at https://sebapersson.github.io/PEtab.jl/stable/API_choosen/
petabProblem = PEtabODEProblem(
petabModel,
odeSolverOptions=ODESolverOptions(Rodas5P(), abstol=1e-08, reltol=1e-08, maxiters=Int64(1e4)),
gradientMethod=:ForwardDiff,
hessianMethod=:ForwardDiff,
sparseJacobian=nothing,
ode_solver=ODESolver(Rodas5P(), abstol=1e-08, reltol=1e-08, maxiters=Int64(1e4)),
gradient_method=:ForwardDiff,
hessian_method=:ForwardDiff,
sparse_jacobian=nothing,
verbose=true
)

Expand Down
16 changes: 8 additions & 8 deletions pypesto/objective/julia/petabJl.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ def __init__(
self.petab_jl_problem = petab_jl_problem

# get functions
fun = self.petab_jl_problem.computeCost
grad = self.petab_jl_problem.computeGradient
hess = self.petab_jl_problem.computeHessian
x_names = np.asarray(self.petab_jl_problem.θ_estNames)
fun = self.petab_jl_problem.compute_cost
grad = self.petab_jl_problem.compute_gradient
hess = self.petab_jl_problem.compute_hessian
x_names = np.asarray(self.petab_jl_problem.θ_names)

# call the super super super constructor
super(JuliaObjective, self).__init__(
Expand Down Expand Up @@ -102,10 +102,10 @@ def __setstate__(self, state):
self.petab_jl_problem = petab_jl_problem

# get functions
fun = self.petab_jl_problem.computeCost
grad = self.petab_jl_problem.computeGradient
hess = self.petab_jl_problem.computeHessian
x_names = np.asarray(self.petab_jl_problem.θ_estNames)
fun = self.petab_jl_problem.compute_cost
grad = self.petab_jl_problem.compute_gradient
hess = self.petab_jl_problem.compute_hessian
x_names = np.asarray(self.petab_jl_problem.θ_names)

# call the super super constructor
super(JuliaObjective, self).__init__(fun, grad, hess, x_names)
Expand Down
100 changes: 50 additions & 50 deletions pypesto/objective/julia/petab_jl_importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ def __init__(
@staticmethod
def from_yaml(
yaml_file: str,
odeSolverOptions: Optional[dict] = None,
gradientMethod: Optional[str] = None,
hessianMethod: Optional[str] = None,
sparseJacobian: Optional[bool] = None,
ode_solver_options: Optional[dict] = None,
gradient_method: Optional[str] = None,
hessian_method: Optional[str] = None,
sparse_jacobian: Optional[bool] = None,
verbose: Optional[bool] = None,
directory: Optional[str] = None,
) -> PetabJlImporter:
Expand All @@ -67,11 +67,11 @@ def from_yaml(
----------
yaml_file:
The yaml file of the PEtab problem
odeSolverOptions:
ode_solver_options:
Dictionary like options for the ode solver in julia
gradientMethod, hessianMethod:
gradient_method, hessian_method:
Julia methods to compute gradient and hessian
sparseJacobian:
sparse_jacobian:
Whether to compute sparse Jacobians
verbose:
Whether to have a more informative log.
Expand All @@ -81,10 +81,10 @@ def from_yaml(
"""
# get default values
options = _get_default_options(
odeSolverOptions=odeSolverOptions,
gradientMethod=gradientMethod,
hessianMethod=hessianMethod,
sparseJacobian=sparseJacobian,
ode_solver_options=ode_solver_options,
gradient_method=gradient_method,
hessian_method=hessian_method,
sparse_jacobian=sparse_jacobian,
verbose=verbose,
)

Expand Down Expand Up @@ -166,8 +166,8 @@ def create_problem(
multistart optimization.
"""
obj = self.create_objective(precompile=precompile)
lb = np.asarray(self.petab_jl_problem.lowerBounds)
ub = np.asarray(self.petab_jl_problem.upperBounds)
lb = np.asarray(self.petab_jl_problem.lower_bounds)
ub = np.asarray(self.petab_jl_problem.upper_bounds)

return Problem(
objective=obj,
Expand All @@ -181,10 +181,10 @@ def create_problem(


def _get_default_options(
odeSolverOptions: Union[dict, None] = None,
gradientMethod: Union[str, None] = None,
hessianMethod: Union[str, None] = None,
sparseJacobian: Union[str, None] = None,
ode_solver_options: Union[dict, None] = None,
gradient_method: Union[str, None] = None,
hessian_method: Union[str, None] = None,
sparse_jacobian: Union[str, None] = None,
verbose: Union[str, None] = None,
) -> dict:
"""
Expand All @@ -194,13 +194,13 @@ def _get_default_options(
Parameters
----------
odeSolverOptions:
ode_solver_options:
Options for the ODE solver.
gradientMethod:
gradient_method:
Method for gradient calculation.
hessianMethod:
hessian_method:
Method for hessian calculation.
sparseJacobian:
sparse_jacobian:
Whether the jacobian should be sparse.
verbose:
Whether to print verbose output.
Expand All @@ -211,51 +211,51 @@ def _get_default_options(
The options.
"""
# get default values
if odeSolverOptions is None:
odeSolverOptions = {
if ode_solver_options is None:
ode_solver_options = {
"solver": "Rodas5P",
"abstol": 1e-8,
"reltol": 1e-8,
"maxiters": "Int64(1e4)",
}
if not odeSolverOptions["solver"].endswith("()"):
odeSolverOptions["solver"] += "()" # add parentheses
if gradientMethod is None:
gradientMethod = "nothing"
if hessianMethod is None:
hessianMethod = "nothing"
if sparseJacobian is None:
sparseJacobian = "nothing"
if not ode_solver_options["solver"].endswith("()"):
ode_solver_options["solver"] += "()" # add parentheses
if gradient_method is None:
gradient_method = "nothing"
if hessian_method is None:
hessian_method = "nothing"
if sparse_jacobian is None:
sparse_jacobian = "nothing"
if verbose is None:
verbose = "true"

# check values for gradientMethod and hessianMethod
# check values for gradient_method and hessian_method
allowed_gradient_methods = [
"ForwardDiff",
"ForwardEquations",
"Adjoint",
"Zygote",
]
if gradientMethod not in allowed_gradient_methods:
if gradient_method not in allowed_gradient_methods:
logger.warning(
f"gradientMethod {gradientMethod} is not in "
f"gradient_method {gradient_method} is not in "
f"{allowed_gradient_methods}. Defaulting to ForwardDiff."
)
gradientMethod = "ForwardDiff"
gradient_method = "ForwardDiff"
allowed_hessian_methods = ["ForwardDiff", "BlocForwardDiff", "GaussNewton"]
if hessianMethod not in allowed_hessian_methods:
if hessian_method not in allowed_hessian_methods:
logger.warning(
f"hessianMethod {hessianMethod} is not in "
f"hessian_method {hessian_method} is not in "
f"{allowed_hessian_methods}. Defaulting to ForwardDiff."
)
hessianMethod = "ForwardDiff"
hessian_method = "ForwardDiff"

# fill options
options = {
"odeSolverOptions": odeSolverOptions,
"gradientMethod": gradientMethod,
"hessianMethod": hessianMethod,
"sparseJacobian": sparseJacobian,
"ode_solver_options": ode_solver_options,
"gradient_method": gradient_method,
"hessian_method": hessian_method,
"sparse_jacobian": sparse_jacobian,
"verbose": verbose,
}
return options
Expand Down Expand Up @@ -293,7 +293,7 @@ def _write_julia_file(
"PEtab.jl/dev/API_choosen/#PEtab.setupPEtabODEProblem"
)
odeSolvOpt_str = ", ".join(
[f"{k}={v}" for k, v in options["odeSolverOptions"].items()]
[f"{k}={v}" for k, v in options["ode_solver_options"].items()]
)
# delete "solver=" from string
odeSolvOpt_str = odeSolvOpt_str.replace("solver=", "")
Expand All @@ -304,15 +304,15 @@ def _write_julia_file(
f"using Sundials\n"
f"using PEtab\n\n"
f"pathYaml = \"{yaml_file}\"\n"
f"petabModel = readPEtabModel(pathYaml, verbose=true)\n\n"
f"# A full list of options for createPEtabODEProblem can be "
f"petabModel = PEtabModel(pathYaml, verbose=true)\n\n"
f"# A full list of options for PEtabODEProblem can be "
f"found at {link_to_options}\n"
f"petabProblem = createPEtabODEProblem(\n\t"
f"petabProblem = PEtabODEProblem(\n\t"
f"petabModel,\n\t"
f"odeSolverOptions=ODESolverOptions({odeSolvOpt_str}),\n\t"
f"gradientMethod=:{options['gradientMethod']},\n\t"
f"hessianMethod=:{options['hessianMethod']},\n\t"
f"sparseJacobian={options['sparseJacobian']},\n\t"
f"ode_solver=ODESolver({odeSolvOpt_str}),\n\t"
f"gradient_method=:{options['gradient_method']},\n\t"
f"hessian_method=:{options['hessian_method']},\n\t"
f"sparse_jacobian={options['sparse_jacobian']},\n\t"
f"verbose={options['verbose']}\n)\n\nend\n"
)
# write file
Expand Down

0 comments on commit 520b718

Please sign in to comment.