diff --git a/pypesto/objective/aesara.py b/pypesto/objective/aesara.py index e37210239..b9e654d6b 100644 --- a/pypesto/objective/aesara.py +++ b/pypesto/objective/aesara.py @@ -3,7 +3,7 @@ Adds an interface for the construction of loss functions incorporating aesara models. This permits computation of derivatives using a -combination of objective based methods and aeara based backpropagation. +combination of objective based methods and aesara based backpropagation. """ import copy @@ -43,7 +43,7 @@ class AesaraObjective(ObjectiveBase): objective: The `pypesto.ObjectiveBase` to wrap. aet_x: - Tensor variables that that define the variables of `aet_fun` + Tensor variables that define the variables of `aet_fun` aet_fun: Aesara function that maps `aet_x` to the variables of `objective` coeff: @@ -228,7 +228,7 @@ class AesaraObjectiveOp(Op): Parameters ---------- obj: - Base aseara objective + Base aesara objective coeff: Multiplicative coefficient for the objective function value """ @@ -248,7 +248,7 @@ def __init__(self, obj: AesaraObjective, coeff: Optional[float] = 1.0): def perform(self, node, inputs, outputs, params=None): # noqa # note that we use precomputed values from the outer - # AesaraObjective.call_unprocessed here, which which means we can + # AesaraObjective.call_unprocessed here, which means we can # ignore inputs here log_prob = self._coeff * self._objective.inner_ret[FVAL] outputs[0][0] = np.array(log_prob) @@ -275,7 +275,7 @@ class AesaraObjectiveGradOp(Op): Parameters ---------- obj: - Base aseara objective + Base aesara objective coeff: Multiplicative coefficient for the objective function value """ @@ -321,7 +321,7 @@ class AesaraObjectiveHessOp(Op): Parameters ---------- obj: - Base aseara objective + Base aesara objective coeff: Multiplicative coefficient for the objective function value """ @@ -335,7 +335,7 @@ def __init__(self, obj: AesaraObjective, coeff: Optional[float] = 1.0): def perform(self, node, inputs, outputs, params=None): # noqa # note that we use precomputed values from the outer - # AesaraObjective.call_unprocessed here, which which means we can + # AesaraObjective.call_unprocessed here, which means we can # ignore inputs here log_prob_hess = self._coeff * self._objective.inner_ret[HESS] outputs[0][0] = log_prob_hess diff --git a/pypesto/objective/aggregated.py b/pypesto/objective/aggregated.py index febece237..9ce6c084f 100644 --- a/pypesto/objective/aggregated.py +++ b/pypesto/objective/aggregated.py @@ -111,7 +111,7 @@ def get_config(self) -> dict: def aggregate_results(rvals: Sequence[ResultDict]) -> ResultDict: """ - Aggregrate the results from the provided ResultDicts into a single one. + Aggregate the results from the provided ResultDicts into a single one. Parameters ---------- diff --git a/pypesto/objective/base.py b/pypesto/objective/base.py index c4a95529f..0b120eef8 100644 --- a/pypesto/objective/base.py +++ b/pypesto/objective/base.py @@ -255,7 +255,7 @@ def get_config(self) -> dict: """ Get the configuration information of the objective function. - Return it as a dictonary. + Return it as a dictionary. """ info = {'type': self.__class__.__name__} return info diff --git a/pypesto/objective/finite_difference.py b/pypesto/objective/finite_difference.py index 81f90d5b4..bb8f5c1a8 100644 --- a/pypesto/objective/finite_difference.py +++ b/pypesto/objective/finite_difference.py @@ -181,7 +181,7 @@ def _update( # shape (n_delta, n_par, ...) nablas = np.array(nablas) - # The stability vector is the the absolute difference of Jacobian + # The stability vector is the absolute difference of Jacobian # entries towards smaller and larger deltas, thus indicating the # change in the approximation when changing delta. # This is done separately for each parameter. Then, for each the delta