diff --git a/.gitignore b/.gitignore index bbd8a474..310bd5ea 100644 --- a/.gitignore +++ b/.gitignore @@ -29,7 +29,7 @@ tsp_test/ # Virtual environments venvp/ - +venv # Aider tool .aider* diff --git a/src/mlrose_ky/__init__.py b/src/mlrose_ky/__init__.py index ae2417bf..c08a991b 100644 --- a/src/mlrose_ky/__init__.py +++ b/src/mlrose_ky/__init__.py @@ -3,16 +3,34 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause +# noinspection PyUnresolvedReferences from .algorithms.ga import genetic_alg + +# noinspection PyUnresolvedReferences from .algorithms.sa import simulated_annealing + +# noinspection PyUnresolvedReferences from .algorithms.hc import hill_climb + +# noinspection PyUnresolvedReferences from .algorithms.rhc import random_hill_climb + +# noinspection PyUnresolvedReferences from .algorithms.gd import gradient_descent + +# noinspection PyUnresolvedReferences from .algorithms.mimic import mimic -from .algorithms.decay import GeometricDecay, ArithmeticDecay, ExponentialDecay, CustomDecay + +# noinspection PyUnresolvedReferences +from .algorithms.decay import GeomDecay, ArithDecay, ExpDecay, CustomSchedule + +# noinspection PyUnresolvedReferences from .algorithms.crossovers import OnePointCrossover, UniformCrossover, TSPCrossover -from .algorithms.mutators import SingleGeneMutator, DiscreteGeneMutator, GeneSwapMutator, SingleShiftMutator +# noinspection PyUnresolvedReferences +from .algorithms.mutators import ChangeOneMutator, DiscreteMutator, SwapMutator, ShiftOneMutator + +# noinspection PyUnresolvedReferences from .fitness import ( OneMax, FlipFlop, @@ -26,17 +44,29 @@ CustomFitness, ) +# noinspection PyUnresolvedReferences from .neural import NeuralNetwork, LinearRegression, LogisticRegression, NNClassifier, nn_core + +# noinspection PyUnresolvedReferences from .neural.activation import identity, relu, leaky_relu, sigmoid, softmax, tanh + +# noinspection PyUnresolvedReferences from .neural.fitness import NetworkWeights + +# noinspection PyUnresolvedReferences from .neural.utils.weights import flatten_weights, unflatten_weights +# noinspection PyUnresolvedReferences from .gridsearch import GridSearchMixin +# noinspection PyUnresolvedReferences from .opt_probs import DiscreteOpt, ContinuousOpt, KnapsackOpt, TSPOpt, QueensOpt, FlipFlopOpt, MaxKColorOpt +# noinspection PyUnresolvedReferences from .runners import GARunner, MIMICRunner, RHCRunner, SARunner, NNGSRunner, SKMLPRunner, build_data_filename +# noinspection PyUnresolvedReferences from .generators import MaxKColorGenerator, QueensGenerator, FlipFlopGenerator, TSPGenerator, KnapsackGenerator, ContinuousPeaksGenerator +# noinspection PyUnresolvedReferences from .samples import SyntheticDataGenerator, plot_synthetic_dataset diff --git a/src/mlrose_ky/algorithms/__init__.py b/src/mlrose_ky/algorithms/__init__.py index a044c021..0923ccb8 100644 --- a/src/mlrose_ky/algorithms/__init__.py +++ b/src/mlrose_ky/algorithms/__init__.py @@ -3,15 +3,12 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause +from .crossovers import UniformCrossover, TSPCrossover, OnePointCrossover +from .decay import ArithDecay, CustomSchedule, ExpDecay, GeomDecay from .ga import genetic_alg -from .sa import simulated_annealing -from .hc import hill_climb -from .rhc import random_hill_climb from .gd import gradient_descent +from .hc import hill_climb from .mimic import mimic - -from .crossovers import UniformCrossover, TSPCrossover, OnePointCrossover - -from .decay import ArithmeticDecay, CustomDecay, ExponentialDecay, GeometricDecay - -from .mutators import SingleGeneMutator, DiscreteGeneMutator, SingleShiftMutator, GeneSwapMutator +from .mutators import ChangeOneMutator, DiscreteMutator, ShiftOneMutator, SwapMutator +from .rhc import random_hill_climb +from .sa import simulated_annealing diff --git a/src/mlrose_ky/algorithms/crossovers/one_point_crossover.py b/src/mlrose_ky/algorithms/crossovers/one_point_crossover.py index 440c1735..460b0e87 100644 --- a/src/mlrose_ky/algorithms/crossovers/one_point_crossover.py +++ b/src/mlrose_ky/algorithms/crossovers/one_point_crossover.py @@ -7,9 +7,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any, Sequence +import numpy as np + from mlrose_ky.algorithms.crossovers._crossover_base import _CrossoverBase diff --git a/src/mlrose_ky/algorithms/crossovers/tsp_crossover.py b/src/mlrose_ky/algorithms/crossovers/tsp_crossover.py index 159c4593..f0ab2ef8 100644 --- a/src/mlrose_ky/algorithms/crossovers/tsp_crossover.py +++ b/src/mlrose_ky/algorithms/crossovers/tsp_crossover.py @@ -8,9 +8,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any, Sequence +import numpy as np + from mlrose_ky.algorithms.crossovers._crossover_base import _CrossoverBase diff --git a/src/mlrose_ky/algorithms/crossovers/uniform_crossover.py b/src/mlrose_ky/algorithms/crossovers/uniform_crossover.py index f9ee7a13..67bab0a9 100644 --- a/src/mlrose_ky/algorithms/crossovers/uniform_crossover.py +++ b/src/mlrose_ky/algorithms/crossovers/uniform_crossover.py @@ -8,9 +8,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any, Sequence +import numpy as np + from mlrose_ky.algorithms.crossovers._crossover_base import _CrossoverBase diff --git a/src/mlrose_ky/algorithms/decay/__init__.py b/src/mlrose_ky/algorithms/decay/__init__.py index e2768d2b..32ab6dbe 100644 --- a/src/mlrose_ky/algorithms/decay/__init__.py +++ b/src/mlrose_ky/algorithms/decay/__init__.py @@ -3,7 +3,7 @@ # Author: Genevieve Hayes # License: BSD 3-clause -from .arithmetic_decay import ArithmeticDecay -from .custom_decay import CustomDecay -from .exponential_decay import ExponentialDecay -from .geometric_decay import GeometricDecay, GeomDecay +from .arithmetic_decay import ArithDecay +from .custom_decay import CustomSchedule +from .exponential_decay import ExpDecay +from .geometric_decay import GeomDecay diff --git a/src/mlrose_ky/algorithms/decay/arithmetic_decay.py b/src/mlrose_ky/algorithms/decay/arithmetic_decay.py index 73e3b715..33370d2c 100644 --- a/src/mlrose_ky/algorithms/decay/arithmetic_decay.py +++ b/src/mlrose_ky/algorithms/decay/arithmetic_decay.py @@ -4,7 +4,7 @@ # License: BSD 3-clause -class ArithmeticDecay: +class ArithDecay: """ Schedule for arithmetically decaying the temperature parameter T in a simulated annealing process, calculated using the formula: @@ -36,7 +36,7 @@ class ArithmeticDecay: Examples -------- - >>> schedule = ArithmeticDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + >>> schedule = ArithDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) >>> schedule.evaluate(5) 5.25 """ @@ -55,7 +55,7 @@ def __init__(self, initial_temperature: float = 1.0, decay_rate: float = 0.0001, def __str__(self) -> str: return ( - f"ArithmeticDecay(initial_temperature={self.initial_temperature}, " + f"ArithDecay(initial_temperature={self.initial_temperature}, " f"decay_rate={self.decay_rate}, " f"minimum_temperature={self.minimum_temperature})" ) @@ -64,7 +64,7 @@ def __repr__(self) -> str: return self.__str__() def __eq__(self, other: object) -> bool: - if not isinstance(other, ArithmeticDecay): + if not isinstance(other, ArithDecay): return False return ( self.initial_temperature == other.initial_temperature diff --git a/src/mlrose_ky/algorithms/decay/custom_decay.py b/src/mlrose_ky/algorithms/decay/custom_decay.py index e55160f8..17a11e24 100644 --- a/src/mlrose_ky/algorithms/decay/custom_decay.py +++ b/src/mlrose_ky/algorithms/decay/custom_decay.py @@ -6,7 +6,7 @@ from typing import Callable -class CustomDecay: +class CustomSchedule: """ Class for generating a customizable temperature schedule for simulated annealing. @@ -24,7 +24,7 @@ class CustomDecay: ------- >>> def custom_decay_function(time: int, offset: int) -> float: return time + offset >>> kwargs = {'offset': 10} - >>> schedule = CustomDecay(custom_decay_function, **kwargs) + >>> schedule = CustomSchedule(custom_decay_function, **kwargs) >>> schedule.evaluate(5) 15 """ @@ -34,13 +34,13 @@ def __init__(self, decay_function: Callable[..., float], **kwargs) -> None: self.kwargs: dict = kwargs def __str__(self) -> str: - return f"CustomDecay(function={self.decay_function.__name__}, parameters={self.kwargs})" + return f"CustomSchedule(function={self.decay_function.__name__}, parameters={self.kwargs})" def __repr__(self) -> str: return self.__str__() def __eq__(self, other: object) -> bool: - if not isinstance(other, CustomDecay): + if not isinstance(other, CustomSchedule): return False return self.decay_function == other.decay_function and self.kwargs == other.kwargs diff --git a/src/mlrose_ky/algorithms/decay/exponential_decay.py b/src/mlrose_ky/algorithms/decay/exponential_decay.py index 5ebcbbad..9b7cd0eb 100644 --- a/src/mlrose_ky/algorithms/decay/exponential_decay.py +++ b/src/mlrose_ky/algorithms/decay/exponential_decay.py @@ -6,7 +6,7 @@ import numpy as np -class ExponentialDecay: +class ExpDecay: """ Defines an exponential decay schedule for the temperature parameter T in simulated annealing, using the formula: @@ -38,7 +38,7 @@ class ExponentialDecay: Examples -------- - >>> schedule = ExponentialDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) + >>> schedule = ExpDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) >>> print(schedule.evaluate(5)) 7.788007830714049 """ @@ -57,7 +57,7 @@ def __init__(self, initial_temperature: float = 1.0, decay_rate: float = 0.005, def __str__(self) -> str: return ( - f"ExponentialDecay(initial_temperature={self.initial_temperature}, " + f"ExpDecay(initial_temperature={self.initial_temperature}, " f"decay_rate={self.decay_rate}, " f"minimum_temperature={self.minimum_temperature})" ) @@ -66,7 +66,7 @@ def __repr__(self) -> str: return self.__str__() def __eq__(self, other: object) -> bool: - if not isinstance(other, ExponentialDecay): + if not isinstance(other, ExpDecay): return False return ( self.initial_temperature == other.initial_temperature diff --git a/src/mlrose_ky/algorithms/decay/geometric_decay.py b/src/mlrose_ky/algorithms/decay/geometric_decay.py index 1017f0e4..530218ab 100644 --- a/src/mlrose_ky/algorithms/decay/geometric_decay.py +++ b/src/mlrose_ky/algorithms/decay/geometric_decay.py @@ -3,10 +3,8 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import warnings - -class GeometricDecay: +class GeomDecay: """ Defines a geometric decay schedule for the temperature parameter T in simulated annealing, using the formula: @@ -38,7 +36,7 @@ class GeometricDecay: Examples -------- - >>> schedule = GeometricDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + >>> schedule = GeomDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) >>> print(schedule.evaluate(5)) 7.737809374999998 """ @@ -57,7 +55,7 @@ def __init__(self, initial_temperature: float = 1.0, decay_rate: float = 0.99, m def __str__(self) -> str: return ( - f"GeometricDecay(initial_temperature={self.initial_temperature}, " + f"GeomDecay(initial_temperature={self.initial_temperature}, " f"decay_rate={self.decay_rate}, " f"minimum_temperature={self.minimum_temperature})" ) @@ -66,7 +64,7 @@ def __repr__(self) -> str: return self.__str__() def __eq__(self, other: object) -> bool: - if not isinstance(other, GeometricDecay): + if not isinstance(other, GeomDecay): return False return ( self.initial_temperature == other.initial_temperature @@ -119,18 +117,3 @@ def get_info(self, time: int | None = None, prefix: str = "") -> dict: info[f"{info_prefix}current_value"] = self.evaluate(time) return info - - -# Enable backward compatibility for renamed 'GeomDecay'. -class GeomDecay(GeometricDecay): - def __new__(cls, *args, **kwargs): - """ - The class 'GeomDecay' is deprecated and will be removed in a future release. - Please use 'GeometricDecay' instead. - """ - warnings.warn( - "The class 'GeomDecay' is deprecated and will be removed in a future release. " "Please use 'GeometricDecay' instead.", - DeprecationWarning, - stacklevel=2, - ) - return super(GeomDecay, cls).__new__(cls) diff --git a/src/mlrose_ky/algorithms/gd.py b/src/mlrose_ky/algorithms/gd.py index b34b4d39..d8dcb7f5 100644 --- a/src/mlrose_ky/algorithms/gd.py +++ b/src/mlrose_ky/algorithms/gd.py @@ -3,8 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any + +import numpy as np + from mlrose_ky.decorators import short_name from mlrose_ky.neural.utils import flatten_weights diff --git a/src/mlrose_ky/algorithms/hc.py b/src/mlrose_ky/algorithms/hc.py index 35088100..701daeee 100644 --- a/src/mlrose_ky/algorithms/hc.py +++ b/src/mlrose_ky/algorithms/hc.py @@ -3,8 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any + +import numpy as np + from mlrose_ky.decorators import short_name diff --git a/src/mlrose_ky/algorithms/mimic.py b/src/mlrose_ky/algorithms/mimic.py index 70f3e79a..0ea05d0d 100644 --- a/src/mlrose_ky/algorithms/mimic.py +++ b/src/mlrose_ky/algorithms/mimic.py @@ -3,8 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any + +import numpy as np + from mlrose_ky.decorators import short_name diff --git a/src/mlrose_ky/algorithms/mutators/__init__.py b/src/mlrose_ky/algorithms/mutators/__init__.py index 0ec35ac8..e5ef3892 100644 --- a/src/mlrose_ky/algorithms/mutators/__init__.py +++ b/src/mlrose_ky/algorithms/mutators/__init__.py @@ -3,7 +3,7 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -from .discrete_mutator import DiscreteGeneMutator -from .gene_swap_mutator import GeneSwapMutator -from .single_gene_mutator import SingleGeneMutator -from .single_shift_mutator import SingleShiftMutator +from .discrete_mutator import DiscreteMutator +from .gene_swap_mutator import SwapMutator +from .single_gene_mutator import ChangeOneMutator +from .single_shift_mutator import ShiftOneMutator diff --git a/src/mlrose_ky/algorithms/mutators/_mutator_base.py b/src/mlrose_ky/algorithms/mutators/_mutator_base.py index afc46012..79223962 100644 --- a/src/mlrose_ky/algorithms/mutators/_mutator_base.py +++ b/src/mlrose_ky/algorithms/mutators/_mutator_base.py @@ -7,9 +7,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np -from typing import Any from abc import ABC, abstractmethod +from typing import Any + +import numpy as np class _MutatorBase(ABC): diff --git a/src/mlrose_ky/algorithms/mutators/discrete_mutator.py b/src/mlrose_ky/algorithms/mutators/discrete_mutator.py index 190df2a0..1f8b1e95 100644 --- a/src/mlrose_ky/algorithms/mutators/discrete_mutator.py +++ b/src/mlrose_ky/algorithms/mutators/discrete_mutator.py @@ -3,13 +3,14 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any +import numpy as np + from mlrose_ky.algorithms.mutators._mutator_base import _MutatorBase -class DiscreteGeneMutator(_MutatorBase): +class DiscreteMutator(_MutatorBase): """ A mutator class that performs discrete mutation on individual genes in a genetic algorithm. diff --git a/src/mlrose_ky/algorithms/mutators/gene_swap_mutator.py b/src/mlrose_ky/algorithms/mutators/gene_swap_mutator.py index a529683b..09a7907c 100644 --- a/src/mlrose_ky/algorithms/mutators/gene_swap_mutator.py +++ b/src/mlrose_ky/algorithms/mutators/gene_swap_mutator.py @@ -3,13 +3,14 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any +import numpy as np + from mlrose_ky.algorithms.mutators._mutator_base import _MutatorBase -class GeneSwapMutator(_MutatorBase): +class SwapMutator(_MutatorBase): """ A mutator class that implements the 'Gene Swap' mutation strategy in genetic algorithms. diff --git a/src/mlrose_ky/algorithms/mutators/single_gene_mutator.py b/src/mlrose_ky/algorithms/mutators/single_gene_mutator.py index 87a0c4df..58fe199a 100644 --- a/src/mlrose_ky/algorithms/mutators/single_gene_mutator.py +++ b/src/mlrose_ky/algorithms/mutators/single_gene_mutator.py @@ -3,13 +3,14 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any +import numpy as np + from mlrose_ky.algorithms.mutators._mutator_base import _MutatorBase -class SingleGeneMutator(_MutatorBase): +class ChangeOneMutator(_MutatorBase): """ A mutator class that performs the 'Change One' mutation strategy in a genetic algorithm. diff --git a/src/mlrose_ky/algorithms/mutators/single_shift_mutator.py b/src/mlrose_ky/algorithms/mutators/single_shift_mutator.py index 2a9ebad6..943516d4 100644 --- a/src/mlrose_ky/algorithms/mutators/single_shift_mutator.py +++ b/src/mlrose_ky/algorithms/mutators/single_shift_mutator.py @@ -3,13 +3,14 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Any +import numpy as np + from mlrose_ky.algorithms.mutators._mutator_base import _MutatorBase -class SingleShiftMutator(_MutatorBase): +class ShiftOneMutator(_MutatorBase): """ A mutator class that implements the 'Shift One' mutation strategy in a genetic algorithm. diff --git a/src/mlrose_ky/algorithms/rhc.py b/src/mlrose_ky/algorithms/rhc.py index 7bd511a4..7faaf1c7 100644 --- a/src/mlrose_ky/algorithms/rhc.py +++ b/src/mlrose_ky/algorithms/rhc.py @@ -3,8 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any + +import numpy as np + from mlrose_ky.decorators import short_name diff --git a/src/mlrose_ky/algorithms/sa.py b/src/mlrose_ky/algorithms/sa.py index f332a64b..e31bbb89 100644 --- a/src/mlrose_ky/algorithms/sa.py +++ b/src/mlrose_ky/algorithms/sa.py @@ -3,16 +3,18 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any + +import numpy as np + +from mlrose_ky.algorithms.decay import GeomDecay from mlrose_ky.decorators import short_name -from mlrose_ky.algorithms.decay import GeometricDecay @short_name("sa") def simulated_annealing( problem: Any, - schedule: Any = GeometricDecay(), + schedule: Any = GeomDecay(), max_attempts: int = 10, max_iters: int = np.inf, init_state: np.ndarray = None, @@ -29,7 +31,7 @@ def simulated_annealing( Object containing fitness function optimization problem to be solved. For example, :code:`DiscreteOpt()`, :code:`ContinuousOpt()` or :code:`TSPOpt()`. - schedule: schedule object, default: :code:`mlrose_ky.GeometricDecay()` + schedule: schedule object, default: :code:`mlrose_ky.GeomDecay()` Schedule used to determine the value of the temperature parameter. max_attempts: int, default: 10 Maximum number of attempts to find a better neighbor at each step. diff --git a/src/mlrose_ky/fitness/__init__.py b/src/mlrose_ky/fitness/__init__.py index 4263d3f2..05761350 100644 --- a/src/mlrose_ky/fitness/__init__.py +++ b/src/mlrose_ky/fitness/__init__.py @@ -1,21 +1,12 @@ """Classes for defining fitness functions (i.e., optimization problems) for optimization algorithms.""" from .continuous_peaks import ContinuousPeaks - +from .custom_fitness import CustomFitness from .flip_flop import FlipFlop - from .four_peaks import FourPeaks - from .knapsack import Knapsack - from .max_k_color import MaxKColor - from .one_max import OneMax - from .queens import Queens - from .six_peaks import SixPeaks - from .travelling_salesperson import TravellingSalesperson - -from .custom_fitness import CustomFitness diff --git a/src/mlrose_ky/fitness/custom_fitness.py b/src/mlrose_ky/fitness/custom_fitness.py index efafca8e..b990f8a8 100644 --- a/src/mlrose_ky/fitness/custom_fitness.py +++ b/src/mlrose_ky/fitness/custom_fitness.py @@ -3,9 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable, Any +import numpy as np + class CustomFitness: """Class for generating your own fitness function. diff --git a/src/mlrose_ky/fitness/travelling_salesperson.py b/src/mlrose_ky/fitness/travelling_salesperson.py index fc93406a..b55f57a4 100644 --- a/src/mlrose_ky/fitness/travelling_salesperson.py +++ b/src/mlrose_ky/fitness/travelling_salesperson.py @@ -3,9 +3,10 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np from typing import Callable +import numpy as np + class TravellingSalesperson: """ diff --git a/src/mlrose_ky/generators/flip_flop_generator.py b/src/mlrose_ky/generators/flip_flop_generator.py index d58f2d1e..6c9b674a 100644 --- a/src/mlrose_ky/generators/flip_flop_generator.py +++ b/src/mlrose_ky/generators/flip_flop_generator.py @@ -4,6 +4,7 @@ # License: BSD 3-clause import numpy as np + from mlrose_ky import FlipFlopOpt diff --git a/src/mlrose_ky/generators/knapsack_generator.py b/src/mlrose_ky/generators/knapsack_generator.py index a159f618..695f5159 100644 --- a/src/mlrose_ky/generators/knapsack_generator.py +++ b/src/mlrose_ky/generators/knapsack_generator.py @@ -4,6 +4,7 @@ # License: BSD 3-clause import numpy as np + from mlrose_ky import KnapsackOpt diff --git a/src/mlrose_ky/generators/max_k_color_generator.py b/src/mlrose_ky/generators/max_k_color_generator.py index 85361f5c..8ada51bb 100644 --- a/src/mlrose_ky/generators/max_k_color_generator.py +++ b/src/mlrose_ky/generators/max_k_color_generator.py @@ -3,8 +3,8 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np import networkx as nx +import numpy as np from mlrose_ky.opt_probs import MaxKColorOpt diff --git a/src/mlrose_ky/generators/tsp_generator.py b/src/mlrose_ky/generators/tsp_generator.py index b4f3035c..2d92d252 100644 --- a/src/mlrose_ky/generators/tsp_generator.py +++ b/src/mlrose_ky/generators/tsp_generator.py @@ -3,11 +3,12 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -import numpy as np -import networkx as nx import itertools as it from collections import defaultdict +import networkx as nx +import numpy as np + from mlrose_ky import TSPOpt diff --git a/src/mlrose_ky/gridsearch/grid_search_mixin.py b/src/mlrose_ky/gridsearch/grid_search_mixin.py index 96a993e3..bdbdfbfc 100644 --- a/src/mlrose_ky/gridsearch/grid_search_mixin.py +++ b/src/mlrose_ky/gridsearch/grid_search_mixin.py @@ -6,10 +6,11 @@ """ import inspect +from typing import Callable, Any + import numpy as np import sklearn.metrics as skmt import sklearn.model_selection as skms -from typing import Callable, Any class GridSearchMixin: diff --git a/src/mlrose_ky/neural/__init__.py b/src/mlrose_ky/neural/__init__.py index 7ec8c4a5..a3b1108f 100644 --- a/src/mlrose_ky/neural/__init__.py +++ b/src/mlrose_ky/neural/__init__.py @@ -4,9 +4,9 @@ # License: BSD 3-clause from .activation import identity, relu, leaky_relu, sigmoid, softmax, tanh -from .utils import flatten_weights, unflatten_weights -from .neural_network import NeuralNetwork from .fitness.network_weights import NetworkWeights -from .logistic_regression import LogisticRegression from .linear_regression import LinearRegression +from .logistic_regression import LogisticRegression +from .neural_network import NeuralNetwork from .nn_classifier import NNClassifier +from .utils import flatten_weights, unflatten_weights diff --git a/src/mlrose_ky/neural/activation/__init__.py b/src/mlrose_ky/neural/activation/__init__.py index 12fead1b..35c625eb 100644 --- a/src/mlrose_ky/neural/activation/__init__.py +++ b/src/mlrose_ky/neural/activation/__init__.py @@ -4,8 +4,8 @@ # License: BSD 3-clause from .identity import identity -from .relu import relu from .leaky_relu import leaky_relu +from .relu import relu from .sigmoid import sigmoid from .softmax import softmax from .tanh import tanh diff --git a/src/mlrose_ky/neural/activation/identity.py b/src/mlrose_ky/neural/activation/identity.py index 3fbe5408..ddba278e 100644 --- a/src/mlrose_ky/neural/activation/identity.py +++ b/src/mlrose_ky/neural/activation/identity.py @@ -1,12 +1,12 @@ """Neural network activation functions.""" -# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) -# License: BSD 3-clause -from mlrose_ky.decorators import short_name +import warnings import numpy as np -import warnings +# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) +# License: BSD 3-clause +from mlrose_ky.decorators import short_name warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/activation/leaky_relu.py b/src/mlrose_ky/neural/activation/leaky_relu.py index 3a7d11f5..6a2bfb42 100644 --- a/src/mlrose_ky/neural/activation/leaky_relu.py +++ b/src/mlrose_ky/neural/activation/leaky_relu.py @@ -1,14 +1,14 @@ """Neural network activation functions.""" +import warnings + +import numpy as np + # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # Contributor : Ankit Grover # License: BSD 3-clause from mlrose_ky.decorators import short_name -import numpy as np - -import warnings - warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/activation/relu.py b/src/mlrose_ky/neural/activation/relu.py index dbb70725..4d5bf098 100644 --- a/src/mlrose_ky/neural/activation/relu.py +++ b/src/mlrose_ky/neural/activation/relu.py @@ -3,11 +3,12 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause -from mlrose_ky.decorators import short_name - import warnings + import numpy as np +from mlrose_ky.decorators import short_name + warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/activation/sigmoid.py b/src/mlrose_ky/neural/activation/sigmoid.py index d9072754..63b1bf82 100644 --- a/src/mlrose_ky/neural/activation/sigmoid.py +++ b/src/mlrose_ky/neural/activation/sigmoid.py @@ -1,12 +1,12 @@ """Neural network activation functions.""" -# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) -# License: BSD 3-clause -from mlrose_ky.decorators import short_name +import warnings import numpy as np -import warnings +# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) +# License: BSD 3-clause +from mlrose_ky.decorators import short_name warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/activation/softmax.py b/src/mlrose_ky/neural/activation/softmax.py index d18cd6f8..7c8557ae 100644 --- a/src/mlrose_ky/neural/activation/softmax.py +++ b/src/mlrose_ky/neural/activation/softmax.py @@ -1,12 +1,12 @@ """Neural network activation functions.""" -# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) -# License: BSD 3-clause -from mlrose_ky.decorators import short_name +import warnings import numpy as np -import warnings +# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) +# License: BSD 3-clause +from mlrose_ky.decorators import short_name warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/activation/tanh.py b/src/mlrose_ky/neural/activation/tanh.py index defac4a5..1079c95b 100644 --- a/src/mlrose_ky/neural/activation/tanh.py +++ b/src/mlrose_ky/neural/activation/tanh.py @@ -1,12 +1,12 @@ """Neural network activation functions.""" -# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) -# License: BSD 3-clause -from mlrose_ky.decorators import short_name +import warnings import numpy as np -import warnings +# Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) +# License: BSD 3-clause +from mlrose_ky.decorators import short_name warnings.filterwarnings("ignore") diff --git a/src/mlrose_ky/neural/fitness/network_weights.py b/src/mlrose_ky/neural/fitness/network_weights.py index 6aa24a40..6ea08638 100644 --- a/src/mlrose_ky/neural/fitness/network_weights.py +++ b/src/mlrose_ky/neural/fitness/network_weights.py @@ -6,8 +6,8 @@ import numpy as np import sklearn.metrics as skm -from mlrose_ky.neural.utils import unflatten_weights from mlrose_ky.neural import activation as act +from mlrose_ky.neural.utils import unflatten_weights class NetworkWeights: diff --git a/src/mlrose_ky/neural/linear_regression.py b/src/mlrose_ky/neural/linear_regression.py index e5c7d843..594ef15d 100644 --- a/src/mlrose_ky/neural/linear_regression.py +++ b/src/mlrose_ky/neural/linear_regression.py @@ -5,8 +5,7 @@ from sklearn.base import RegressorMixin - -from mlrose_ky.algorithms.decay import GeometricDecay +from mlrose_ky.algorithms.decay import GeomDecay from .nn_core import _NNCore @@ -44,7 +43,7 @@ class LinearRegression(_NNCore, RegressorMixin): Number of random restarts. Only required if :code:`algorithm = 'random_hill_climb'`. - schedule: schedule object, default = mlrose_ky.GeometricDecay() + schedule: schedule object, default = mlrose_ky.GeomDecay() Schedule used to determine the value of the temperature parameter. Only required if :code:`algorithm = 'simulated_annealing'`. @@ -90,7 +89,7 @@ def __init__( early_stopping=False, clip_max=1e10, restarts=0, - schedule=GeometricDecay(), + schedule=GeomDecay(), pop_size=200, mutation_prob=0.1, max_attempts=10, diff --git a/src/mlrose_ky/neural/logistic_regression.py b/src/mlrose_ky/neural/logistic_regression.py index bee8162a..a0cb249a 100644 --- a/src/mlrose_ky/neural/logistic_regression.py +++ b/src/mlrose_ky/neural/logistic_regression.py @@ -6,7 +6,7 @@ from sklearn.base import ClassifierMixin -from mlrose_ky.algorithms.decay import GeometricDecay +from mlrose_ky.algorithms.decay import GeomDecay from .nn_core import _NNCore @@ -44,7 +44,7 @@ class LogisticRegression(_NNCore, ClassifierMixin): Number of random restarts. Only required if :code:`algorithm = 'random_hill_climb'`. - schedule: schedule object, default = mlrose_ky.GeometricDecay() + schedule: schedule object, default = mlrose_ky.GeomDecay() Schedule used to determine the value of the temperature parameter. Only required if :code:`algorithm = 'simulated_annealing'`. @@ -90,14 +90,13 @@ def __init__( early_stopping=False, clip_max=1e10, restarts=0, - schedule=GeometricDecay(), + schedule=GeomDecay(), pop_size=200, mutation_prob=0.1, max_attempts=10, random_state=None, curve=False, ): - _NNCore.__init__( self, hidden_nodes=[], diff --git a/src/mlrose_ky/neural/neural_network.py b/src/mlrose_ky/neural/neural_network.py index cf0a806b..bd9270f8 100644 --- a/src/mlrose_ky/neural/neural_network.py +++ b/src/mlrose_ky/neural/neural_network.py @@ -5,7 +5,7 @@ from sklearn.base import ClassifierMixin -from mlrose_ky.algorithms.decay import GeometricDecay +from mlrose_ky.algorithms.decay import GeomDecay from .nn_core import _NNCore @@ -53,7 +53,7 @@ class NeuralNetwork(_NNCore, ClassifierMixin): Number of random restarts. Only required if :code:`algorithm = 'random_hill_climb'`. - schedule: schedule object, default = mlrose_ky.GeometricDecay() + schedule: schedule object, default = mlrose_ky.GeomDecay() Schedule used to determine the value of the temperature parameter. Only required if :code:`algorithm = 'simulated_annealing'`. @@ -108,7 +108,7 @@ def __init__( early_stopping=False, clip_max=1e10, restarts=0, - schedule=GeometricDecay(), + schedule=GeomDecay(), pop_size=200, mutation_prob=0.1, max_attempts=10, diff --git a/src/mlrose_ky/neural/nn_core.py b/src/mlrose_ky/neural/nn_core.py index 5e501569..146dfb40 100644 --- a/src/mlrose_ky/neural/nn_core.py +++ b/src/mlrose_ky/neural/nn_core.py @@ -4,14 +4,15 @@ # License: BSD 3-clause -import numpy as np from abc import abstractmethod + +import numpy as np from sklearn.preprocessing import LabelBinarizer -from mlrose_ky.algorithms.decay import GeometricDecay + +from mlrose_ky.algorithms.decay import GeomDecay +from mlrose_ky.algorithms.ga import genetic_alg from mlrose_ky.algorithms.rhc import random_hill_climb from mlrose_ky.algorithms.sa import simulated_annealing -from mlrose_ky.algorithms.ga import genetic_alg - from mlrose_ky.neural._nn_base import _NNBase from mlrose_ky.neural.activation import identity, relu, sigmoid, tanh from mlrose_ky.neural.utils.weights import gradient_descent_original @@ -37,7 +38,7 @@ def __init__( early_stopping=False, clip_max=1e10, restarts=0, - schedule=GeometricDecay(), + schedule=GeomDecay(), pop_size=200, mutation_prob=0.1, max_attempts=10, diff --git a/src/mlrose_ky/opt_probs/discrete_opt.py b/src/mlrose_ky/opt_probs/discrete_opt.py index 90682806..95ab74df 100644 --- a/src/mlrose_ky/opt_probs/discrete_opt.py +++ b/src/mlrose_ky/opt_probs/discrete_opt.py @@ -4,12 +4,12 @@ # License: BSD 3-clause import numpy as np -from sklearn.metrics import mutual_info_score from scipy.sparse import csr_matrix from scipy.sparse.csgraph import minimum_spanning_tree, depth_first_tree +from sklearn.metrics import mutual_info_score from mlrose_ky.algorithms.crossovers import UniformCrossover -from mlrose_ky.algorithms.mutators import GeneSwapMutator +from mlrose_ky.algorithms.mutators import SwapMutator from mlrose_ky.opt_probs.opt_prob import OptProb @@ -65,7 +65,7 @@ def __init__(self, length, fitness_fn, maximize=True, max_val=2, crossover=None, self.noise = 0 self._crossover = UniformCrossover(self) if crossover is None else crossover - self._mutator = GeneSwapMutator(self) if mutator is None else mutator + self._mutator = SwapMutator(self) if mutator is None else mutator self._mut_mask = None self._mut_inf = None diff --git a/src/mlrose_ky/opt_probs/flip_flop_opt.py b/src/mlrose_ky/opt_probs/flip_flop_opt.py index 6ae0c806..1ac1dc6a 100644 --- a/src/mlrose_ky/opt_probs/flip_flop_opt.py +++ b/src/mlrose_ky/opt_probs/flip_flop_opt.py @@ -6,7 +6,7 @@ import numpy as np from mlrose_ky.algorithms.crossovers import OnePointCrossover -from mlrose_ky.algorithms.mutators import SingleGeneMutator +from mlrose_ky.algorithms.mutators import ChangeOneMutator from mlrose_ky.fitness import FlipFlop from mlrose_ky.opt_probs.discrete_opt import DiscreteOpt @@ -27,7 +27,7 @@ def __init__(self, length=None, fitness_fn=None, maximize=True, crossover=None, self.max_val = 2 crossover = OnePointCrossover(self) if crossover is None else crossover - mutator = SingleGeneMutator(self) if mutator is None else mutator + mutator = ChangeOneMutator(self) if mutator is None else mutator super().__init__(length, fitness_fn, maximize, crossover=crossover, mutator=mutator) state = np.random.randint(2, size=self.length) diff --git a/src/mlrose_ky/opt_probs/knapsack_opt.py b/src/mlrose_ky/opt_probs/knapsack_opt.py index 40523e6e..2e0389fe 100644 --- a/src/mlrose_ky/opt_probs/knapsack_opt.py +++ b/src/mlrose_ky/opt_probs/knapsack_opt.py @@ -4,7 +4,7 @@ # License: BSD 3-clause from mlrose_ky.algorithms.crossovers import UniformCrossover -from mlrose_ky.algorithms.mutators import SingleGeneMutator +from mlrose_ky.algorithms.mutators import ChangeOneMutator from mlrose_ky.fitness.knapsack import Knapsack from mlrose_ky.opt_probs.discrete_opt import DiscreteOpt @@ -48,5 +48,5 @@ def __init__( self.max_val = max_val crossover = UniformCrossover(self) if crossover is None else crossover - mutator = SingleGeneMutator(self) if mutator is None else mutator + mutator = ChangeOneMutator(self) if mutator is None else mutator super().__init__(length, fitness_fn, maximize, max_val, crossover, mutator) diff --git a/src/mlrose_ky/opt_probs/max_k_color_opt.py b/src/mlrose_ky/opt_probs/max_k_color_opt.py index 3c851912..e4496b73 100644 --- a/src/mlrose_ky/opt_probs/max_k_color_opt.py +++ b/src/mlrose_ky/opt_probs/max_k_color_opt.py @@ -3,15 +3,14 @@ # Authors: Genevieve Hayes (modified by Andrew Rollings, Kyle Nakamura) # License: BSD 3-clause +import networkx as nx import numpy as np from mlrose_ky.algorithms.crossovers import UniformCrossover -from mlrose_ky.algorithms.mutators import SingleGeneMutator +from mlrose_ky.algorithms.mutators import ChangeOneMutator from mlrose_ky.fitness import MaxKColor from mlrose_ky.opt_probs.discrete_opt import DiscreteOpt -import networkx as nx - class MaxKColorOpt(DiscreteOpt): def __init__( @@ -60,7 +59,7 @@ def __init__( self.max_val = max_colors crossover = UniformCrossover(self) if crossover is None else crossover - mutator = SingleGeneMutator(self) if mutator is None else mutator + mutator = ChangeOneMutator(self) if mutator is None else mutator super().__init__(length, fitness_fn, maximize, max_colors, crossover, mutator) state = np.random.randint(max_colors, size=self.length) diff --git a/src/mlrose_ky/opt_probs/queens_opt.py b/src/mlrose_ky/opt_probs/queens_opt.py index c8e9d907..09d82dd4 100644 --- a/src/mlrose_ky/opt_probs/queens_opt.py +++ b/src/mlrose_ky/opt_probs/queens_opt.py @@ -6,7 +6,7 @@ import numpy as np from mlrose_ky.algorithms.crossovers import UniformCrossover -from mlrose_ky.algorithms.mutators import SingleGeneMutator +from mlrose_ky.algorithms.mutators import ChangeOneMutator from mlrose_ky.fitness.queens import Queens from mlrose_ky.opt_probs.discrete_opt import DiscreteOpt @@ -29,7 +29,7 @@ def __init__(self, length=None, fitness_fn=None, maximize=False, crossover=None, self.max_val = length crossover = UniformCrossover(self) if crossover is None else crossover - mutator = SingleGeneMutator(self) if mutator is None else mutator + mutator = ChangeOneMutator(self) if mutator is None else mutator super().__init__(length, fitness_fn, maximize, length, crossover, mutator) state = np.random.randint(self.length, size=self.length) diff --git a/src/mlrose_ky/opt_probs/tsp_opt.py b/src/mlrose_ky/opt_probs/tsp_opt.py index 0876c510..7433f17e 100644 --- a/src/mlrose_ky/opt_probs/tsp_opt.py +++ b/src/mlrose_ky/opt_probs/tsp_opt.py @@ -6,7 +6,7 @@ import numpy as np from mlrose_ky.algorithms.crossovers import TSPCrossover -from mlrose_ky.algorithms.mutators import GeneSwapMutator +from mlrose_ky.algorithms.mutators import SwapMutator from mlrose_ky.fitness import TravellingSalesperson from mlrose_ky.opt_probs.discrete_opt import DiscreteOpt @@ -59,9 +59,7 @@ def __init__(self, length=None, fitness_fn=None, maximize=False, coords: list = elif distances is not None: length = len(set([x for (x, _, _) in distances] + [x for (_, x, _) in distances])) self.length: int = length - DiscreteOpt.__init__( - self, length, fitness_fn, maximize, max_val=length, crossover=TSPCrossover(self), mutator=GeneSwapMutator(self) - ) + DiscreteOpt.__init__(self, length, fitness_fn, maximize, max_val=length, crossover=TSPCrossover(self), mutator=SwapMutator(self)) if self.fitness_fn.get_problem_type() != "tsp": raise Exception("""fitness_fn must have problem type 'tsp'.""") diff --git a/src/mlrose_ky/runners/_nn_runner_base.py b/src/mlrose_ky/runners/_nn_runner_base.py index e7a184e7..20772856 100644 --- a/src/mlrose_ky/runners/_nn_runner_base.py +++ b/src/mlrose_ky/runners/_nn_runner_base.py @@ -1,15 +1,15 @@ """This module implements a base runner class for neural network experiments, including grid search functionality.""" -import os -import time import hashlib import logging +import os +import pickle as pk +import time from abc import ABC from typing import Callable import numpy as np import pandas as pd -import pickle as pk from mlrose_ky.gridsearch import GridSearchMixin from mlrose_ky.runners._runner_base import _RunnerBase diff --git a/src/mlrose_ky/runners/_runner_base.py b/src/mlrose_ky/runners/_runner_base.py index e2cb39e2..4a479d90 100644 --- a/src/mlrose_ky/runners/_runner_base.py +++ b/src/mlrose_ky/runners/_runner_base.py @@ -1,19 +1,19 @@ """Base class for managing and running optimization experiments with logging, error handling, and result saving.""" -from abc import ABC, abstractmethod -import time -import os -import logging +import ctypes +import inspect as lk import itertools as it +import logging +import multiprocessing +import os +import pickle as pk +import signal +import time +from abc import ABC, abstractmethod from typing import Any import numpy as np import pandas as pd -import pickle as pk -import inspect as lk -import signal -import multiprocessing -import ctypes from mlrose_ky.decorators import get_short_name from mlrose_ky.runners.utils import build_data_filename diff --git a/src/mlrose_ky/runners/nngs_runner.py b/src/mlrose_ky/runners/nngs_runner.py index fc562554..5020e8f9 100644 --- a/src/mlrose_ky/runners/nngs_runner.py +++ b/src/mlrose_ky/runners/nngs_runner.py @@ -1,8 +1,8 @@ import sklearn.metrics as skmt from mlrose_ky import NNClassifier -from mlrose_ky.runners._nn_runner_base import _NNRunnerBase from mlrose_ky.decorators import short_name, get_short_name +from mlrose_ky.runners._nn_runner_base import _NNRunnerBase """ Example usage: @@ -11,7 +11,7 @@ grid_search_parameters = ({ 'max_iters': [1, 2, 4, 8, 16, 32, 64, 128], # nn params 'learning_rate': [0.001, 0.002, 0.003], # nn params - 'schedule': [ArithmeticDecay(1), ArithmeticDecay(100), ArithmeticDecay(1000)] # sa params + 'schedule': [ArithDecay(1), ArithDecay(100), ArithDecay(1000)] # sa params }) nnr = NNGSRunner(x_train=x_train, diff --git a/src/mlrose_ky/runners/sa_runner.py b/src/mlrose_ky/runners/sa_runner.py index 7ffff623..6d10cf22 100644 --- a/src/mlrose_ky/runners/sa_runner.py +++ b/src/mlrose_ky/runners/sa_runner.py @@ -1,7 +1,8 @@ +import numpy as np + import mlrose_ky from mlrose_ky.decorators import short_name from mlrose_ky.runners._runner_base import _RunnerBase -import numpy as np """ Example usage: @@ -10,7 +11,7 @@ problem = TSPGenerator.generate(seed=SEED, number_of_cities=22) # note that you can also initialize a temperature_list this way - # temperature_list = [mlrose_ky.GeometricDecay(init_temp=t, decay=d) for (t, d) in [(1, 0.99), (1e2, 0.999)]] + # temperature_list = [mlrose_ky.GeomDecay(init_temp=t, decay=d) for (t, d) in [(1, 0.99), (1e2, 0.999)]] # if you use this form, the decay_list parameter is ignored. sa = SARunner(problem=problem, @@ -20,7 +21,7 @@ iteration_list=2 ** np.arange(14), max_attempts=5000, temperature_list=[1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000], - decay_list=[mlrose_ky.GeometricDecay]) + decay_list=[mlrose_ky.GeomDecay]) # the two data frames will contain the results df_run_stats, df_run_curves = sa.run() @@ -55,7 +56,7 @@ def __init__( self.temperature_list = temperature_list if all([np.isscalar(x) for x in temperature_list]): if decay_list is None: - decay_list = [mlrose_ky.GeometricDecay] + decay_list = [mlrose_ky.GeomDecay] self.decay_list = decay_list self.use_raw_temp = False diff --git a/src/mlrose_ky/runners/skmlp_runner.py b/src/mlrose_ky/runners/skmlp_runner.py index d074e465..a1cf7c82 100644 --- a/src/mlrose_ky/runners/skmlp_runner.py +++ b/src/mlrose_ky/runners/skmlp_runner.py @@ -1,4 +1,5 @@ import inspect + import sklearn.metrics as skmt from sklearn.base import BaseEstimator from sklearn.neural_network import MLPClassifier diff --git a/src/mlrose_ky/samples/synthetic_data.py b/src/mlrose_ky/samples/synthetic_data.py index 18aaf7b1..29f51b57 100644 --- a/src/mlrose_ky/samples/synthetic_data.py +++ b/src/mlrose_ky/samples/synthetic_data.py @@ -1,6 +1,7 @@ """Class defining a synthetic dataset generator and a function to visualize a dataset.""" from os import makedirs + import matplotlib.colors as mpl import matplotlib.pyplot as plt import numpy as np diff --git a/tests/test_decay.py b/tests/test_decay.py index fed56570..00a01676 100644 --- a/tests/test_decay.py +++ b/tests/test_decay.py @@ -11,47 +11,47 @@ sys.path.append("..") import mlrose_ky -from mlrose_ky import GeometricDecay, ArithmeticDecay, ExponentialDecay, CustomDecay +from mlrose_ky import GeomDecay, ArithDecay, ExpDecay, CustomSchedule def test_geom_above_min(): """Test geometric decay evaluation function for case where result is above the minimum""" - schedule = GeometricDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + schedule = GeomDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) x = schedule.evaluate(5) assert round(x, 5) == 7.73781 def test_geom_below_min(): """Test geometric decay evaluation function for case where result is below the minimum""" - schedule = GeometricDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + schedule = GeomDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) x = schedule.evaluate(50) assert x == 1 def test_arith_above_min(): """Test arithmetic decay evaluation function for case where result is above the minimum""" - schedule = ArithmeticDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + schedule = ArithDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) x = schedule.evaluate(5) assert x == 5.25 def test_arith_below_min(): """Test arithmetic decay evaluation function for case where result is below the minimum""" - schedule = ArithmeticDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) + schedule = ArithDecay(initial_temperature=10, decay_rate=0.95, minimum_temperature=1) x = schedule.evaluate(50) assert x == 1 def test_exp_above_min(): """Test exponential decay evaluation function for case where result is above the minimum""" - schedule = ExponentialDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) + schedule = ExpDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) x = schedule.evaluate(5) assert round(x, 5) == 7.78801 def test_exp_below_min(): """Test exponential decay evaluation function for case where result is below the minimum""" - schedule = ExponentialDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) + schedule = ExpDecay(initial_temperature=10, decay_rate=0.05, minimum_temperature=1) x = schedule.evaluate(50) assert x == 1 @@ -64,6 +64,6 @@ def custom_decay_function(time: int, offset: int) -> float: return time + offset kwargs = {"offset": 10} - schedule = CustomDecay(custom_decay_function, **kwargs) + schedule = CustomSchedule(custom_decay_function, **kwargs) x = schedule.evaluate(5) assert x == 15 diff --git a/tests/test_runners/test_sa_runner.py b/tests/test_runners/test_sa_runner.py index 1ccbd1ce..182ce06a 100644 --- a/tests/test_runners/test_sa_runner.py +++ b/tests/test_runners/test_sa_runner.py @@ -34,7 +34,7 @@ def runner_kwargs(self, problem): "seed": SEED, "iteration_list": [1, 2, 3], "temperature_list": [1, 10, 50, 100], - "decay_list": [mlrose_ky.GeometricDecay], + "decay_list": [mlrose_ky.GeomDecay], "max_attempts": 500, "generate_curves": True, } @@ -52,7 +52,7 @@ def test_initialize_with_default_temperature_list(self, runner): def test_sarunner_initialization_sets_decay_list(self, runner_kwargs): """Test SARunner initialization sets the decay list.""" runner = SARunner(**runner_kwargs) - assert runner.decay_list == [mlrose_ky.GeometricDecay] + assert runner.decay_list == [mlrose_ky.GeomDecay] def test_run_with_temperature_and_decay_list(self, runner_kwargs): """Test run with temperature and decay list.""" @@ -63,16 +63,16 @@ def test_run_with_temperature_and_decay_list(self, runner_kwargs): # Ensure that temperatures were processed correctly expected_temperatures = [ - mlrose_ky.GeometricDecay(initial_temperature=1), - mlrose_ky.GeometricDecay(initial_temperature=10), - mlrose_ky.GeometricDecay(initial_temperature=50), - mlrose_ky.GeometricDecay(initial_temperature=100), + mlrose_ky.GeomDecay(initial_temperature=1), + mlrose_ky.GeomDecay(initial_temperature=10), + mlrose_ky.GeomDecay(initial_temperature=50), + mlrose_ky.GeomDecay(initial_temperature=100), ] # Check that the schedules used in the call match the expected temperatures for call, expected_schedule in zip(mock_sa.call_args_list, expected_temperatures): schedule = dict(call[1]["callback_user_info"])["schedule"] - assert isinstance(schedule, mlrose_ky.GeometricDecay) + assert isinstance(schedule, mlrose_ky.GeomDecay) assert schedule.__getattribute__("initial_temperature") == expected_schedule.__getattribute__("initial_temperature") def test_max_attempts_respected_during_initialization(self, runner_kwargs):