diff --git a/discrete_optimization/coloring/solvers/coloring_lp_solvers.py b/discrete_optimization/coloring/solvers/coloring_lp_solvers.py index 96f57de93..995835f13 100644 --- a/discrete_optimization/coloring/solvers/coloring_lp_solvers.py +++ b/discrete_optimization/coloring/solvers/coloring_lp_solvers.py @@ -11,6 +11,7 @@ import mip import networkx as nx from mip import BINARY, INTEGER, xsum +from ortools.math_opt.python import mathopt from discrete_optimization.coloring.coloring_model import ( ColoringProblem, @@ -34,6 +35,7 @@ GurobiMilpSolver, MilpSolver, MilpSolverName, + OrtoolsMathOptMilpSolver, PymipMilpSolver, ) @@ -110,6 +112,31 @@ def __init__( self.sense_optim = self.params_objective_function.sense_function self.start_solution: Optional[ColoringSolution] = None + def convert_to_variable_values( + self, solution: ColoringSolution + ) -> dict[Any, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start(). + + """ + # Init all variables to 0 + hinted_variables = { + var: 0 for var in self.variable_decision["colors_var"].values() + } + + # Set var(node, color) to 1 according to the solution + for i, color in enumerate(solution.colors): + node = self.index_to_nodes_name[i] + variable_decision_key = (node, color) + hinted_variables[ + self.variable_decision["colors_var"][variable_decision_key] + ] = 1 + + hinted_variables[self.variable_decision["nb_colors"]] = solution.nb_color + + return hinted_variables + def retrieve_current_solution( self, get_var_value_for_current_solution: Callable[[Any], float], @@ -478,3 +505,162 @@ def init_model(self, **kwargs: Any) -> None: self.description_constraint["constraints_neighbors"] = { "descr": "no neighbors can have same color" } + + +class ColoringLPMathOpt(OrtoolsMathOptMilpSolver, _BaseColoringLP): + """Coloring LP solver based on pymip library. + + Note: + Gurobi and CBC are available as backend solvers. + + + Attributes: + problem (ColoringProblem): coloring problem instance to solve + params_objective_function (ParamsObjectiveFunction): objective function parameters + (however this is just used for the ResultStorage creation, not in the optimisation) + + """ + + hyperparameters = _BaseColoringLP.hyperparameters + + problem: ColoringProblem + solution_hint: Optional[dict[mathopt.Variable, float]] = None + + def convert_to_variable_values( + self, solution: ColoringSolution + ) -> dict[mathopt.Variable, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start() to provide a suitable SolutionHint.variable_values. + See https://or-tools.github.io/docs/pdoc/ortools/math_opt/python/model_parameters.html#SolutionHint + for more information. + + """ + return _BaseColoringLP.convert_to_variable_values(self, solution) + + def init_model(self, **kwargs: Any) -> None: + kwargs = self.complete_with_default_hyperparameters(kwargs) + greedy_start = kwargs["greedy_start"] + use_cliques = kwargs["use_cliques"] + if greedy_start: + logger.info("Computing greedy solution") + greedy_solver = GreedyColoring( + self.problem, + params_objective_function=self.params_objective_function, + ) + sol = greedy_solver.solve( + strategy=NXGreedyColoringMethod.best + ).get_best_solution() + if sol is None: + raise RuntimeError( + "greedy_solver.solve(strategy=NXGreedyColoringMethod.best).get_best_solution() " + "should not be None." + ) + if not isinstance(sol, ColoringSolution): + raise RuntimeError( + "greedy_solver.solve(strategy=NXGreedyColoringMethod.best).get_best_solution() " + "should be a ColoringSolution." + ) + self.start_solution = sol + else: + logger.info("Get dummy solution") + self.start_solution = self.problem.get_dummy_solution() + nb_colors = self.start_solution.nb_color + nb_colors_subset = nb_colors + if self.problem.use_subset: + nb_colors_subset = self.problem.count_colors(self.start_solution.colors) + nb_colors = self.problem.count_colors_all_index(self.start_solution.colors) + + if nb_colors is None: + raise RuntimeError("self.start_solution.nb_color should not be None.") + color_model = mathopt.Model(name="color") + colors_var = {} + range_node = self.nodes_name + range_color = range(nb_colors) + range_color_subset = range(nb_colors_subset) + range_color_per_node = {} + for node in self.nodes_name: + rng = self.get_range_color( + node_name=node, + range_color_subset=range_color_subset, + range_color_all=range_color, + ) + for color in rng: + colors_var[node, color] = color_model.add_binary_variable( + name="x_" + str((node, color)) + ) + range_color_per_node[node] = set(rng) + one_color_constraints = {} + for n in range_node: + one_color_constraints[n] = color_model.add_linear_constraint( + mathopt.LinearSum(colors_var[n, c] for c in range_color_per_node[n]) + == 1 + ) + cliques = [] + g = self.graph.to_networkx() + if use_cliques: + for c in nx.algorithms.clique.find_cliques(g): + cliques += [c] + cliques = sorted(cliques, key=lambda x: len(x), reverse=True) + else: + cliques = [[e[0], e[1]] for e in g.edges()] + cliques_constraint: dict[Union[int, tuple[int, int]], Any] = {} + index_c = 0 + opt = color_model.add_integer_variable(lb=0, ub=nb_colors, name="nb_colors") + color_model.minimize(opt) + if use_cliques: + for c in cliques[:100]: + cliques_constraint[index_c] = color_model.add_linear_constraint( + mathopt.LinearSum( + (color_i + 1) * colors_var[node, color_i] + for node in c + for color_i in range_color_per_node[node] + ) + >= sum([i + 1 for i in range(len(c))]) + ) + cliques_constraint[(index_c, 1)] = color_model.add_linear_constraint( + mathopt.LinearSum( + colors_var[node, color_i] + for node in c + for color_i in range_color_per_node[node] + ) + <= opt + ) + index_c += 1 + edges = g.edges() + constraints_neighbors = {} + for e in edges: + for c in range_color_per_node[e[0]]: + if c in range_color_per_node[e[1]]: + constraints_neighbors[ + (e[0], e[1], c) + ] = color_model.add_linear_constraint( + colors_var[e[0], c] + colors_var[e[1], c] <= 1 + ) + for n in range_node: + color_model.add_linear_constraint( + mathopt.LinearSum( + (color_i + 1) * colors_var[n, color_i] + for color_i in range_color_per_node[n] + ) + <= opt + ) + self.model = color_model + self.variable_decision = {"colors_var": colors_var, "nb_colors": opt} + self.constraints_dict = { + "one_color_constraints": one_color_constraints, + "constraints_neighbors": constraints_neighbors, + } + self.description_variable_description = { + "colors_var": { + "shape": (self.number_of_nodes, nb_colors), + "type": bool, + "descr": "for each node and each color," " a binary indicator", + } + } + self.description_constraint["one_color_constraints"] = { + "descr": "one and only one color " "should be assignated to a node" + } + self.description_constraint["constraints_neighbors"] = { + "descr": "no neighbors can have same color" + } diff --git a/discrete_optimization/facility/solvers/facility_lp_solver.py b/discrete_optimization/facility/solvers/facility_lp_solver.py index 163d3d53f..8a9491f69 100644 --- a/discrete_optimization/facility/solvers/facility_lp_solver.py +++ b/discrete_optimization/facility/solvers/facility_lp_solver.py @@ -12,6 +12,7 @@ import numpy as np import numpy.typing as npt from ortools.linear_solver import pywraplp +from ortools.math_opt.python import mathopt from discrete_optimization.facility.facility_model import ( FacilityProblem, @@ -31,6 +32,7 @@ GurobiMilpSolver, MilpSolver, MilpSolverName, + OrtoolsMathOptMilpSolver, ParametersMilp, PymipMilpSolver, ) @@ -151,7 +153,9 @@ def __init__( problem=problem, params_objective_function=params_objective_function ) self.model = None - self.variable_decision: dict[str, dict[tuple[int, int], Union[int, Any]]] = {} + self.variable_decision: dict[ + str, dict[Union[int, tuple[int, int]], Union[int, Any]] + ] = {} self.constraints_dict: dict[str, dict[int, Any]] = {} self.description_variable_description = { "x": { @@ -164,6 +168,24 @@ def __init__( } self.description_constraint: dict[str, dict[str, str]] = {} + def convert_to_variable_values( + self, solution: FacilitySolution + ) -> dict[Any, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start(). + + """ + # Init all variables to 0 + hinted_variables = {var: 0 for var in self.variable_decision["x"].values()} + # Set var(facility, customer) to 1 according to the solution + for c, f in enumerate(solution.facility_for_customers): + variable_decision_key = (f, c) + hinted_variables[self.variable_decision["x"][variable_decision_key]] = 1 + hinted_variables[self.variable_decision["y"][f]] = 1 + + return hinted_variables + def retrieve_current_solution( self, get_var_value_for_current_solution: Callable[[Any], float], @@ -299,6 +321,111 @@ def set_warm_start(self, solution: FacilitySolution) -> None: self.variable_decision["x"][variable_decision_key].Start = 1 +class LP_Facility_Solver_MathOpt(OrtoolsMathOptMilpSolver, _LPFacilitySolverBase): + """Milp solver using gurobi library + + Attributes: + coloring_model (FacilityProblem): facility problem instance to solve + params_objective_function (ParamsObjectiveFunction): objective function parameters + (however this is just used for the ResultStorage creation, not in the optimisation) + """ + + def init_model(self, **kwargs: Any) -> None: + """ + + Keyword Args: + use_matrix_indicator_heuristic (bool): use the prune search method to reduce number of variable. + n_shortest (int): parameter for the prune search method + n_cheapest (int): parameter for the prune search method + + Returns: None + + """ + nb_facilities = self.problem.facility_count + nb_customers = self.problem.customer_count + kwargs = self.complete_with_default_hyperparameters(kwargs) + use_matrix_indicator_heuristic = kwargs["use_matrix_indicator_heuristic"] + if use_matrix_indicator_heuristic: + n_shortest = kwargs["n_shortest"] + n_cheapest = kwargs["n_cheapest"] + matrix_fc_indicator, matrix_length = prune_search_space( + self.problem, n_cheapest=n_cheapest, n_shortest=n_shortest + ) + else: + matrix_fc_indicator, matrix_length = prune_search_space( + self.problem, + n_cheapest=nb_facilities, + n_shortest=nb_facilities, + ) + s = mathopt.Model(name="facilities") + x: dict[tuple[int, int], Union[int, Any]] = {} + for f in range(nb_facilities): + for c in range(nb_customers): + if matrix_fc_indicator[f, c] == 0: + x[f, c] = 0 + elif matrix_fc_indicator[f, c] == 1: + x[f, c] = 1 + elif matrix_fc_indicator[f, c] == 2: + x[f, c] = s.add_binary_variable(name="x_" + str((f, c))) + facilities = self.problem.facilities + customers = self.problem.customers + used = [s.add_binary_variable(name=f"y_{i}") for i in range(nb_facilities)] + constraints_customer: dict[int, mathopt.LinearConstraint] = {} + for c in range(nb_customers): + constraints_customer[c] = s.add_linear_constraint( + mathopt.LinearSum(x[f, c] for f in range(nb_facilities)) == 1 + ) + # one facility + constraint_capacity: dict[int, mathopt.LinearConstraint] = {} + for f in range(nb_facilities): + for c in range(nb_customers): + s.add_linear_constraint(used[f] >= x[f, c]) + constraint_capacity[f] = s.add_linear_constraint( + mathopt.LinearSum( + x[f, c] * customers[c].demand for c in range(nb_customers) + ) + <= facilities[f].capacity + ) + new_obj_f = 0.0 + new_obj_f += mathopt.LinearSum( + facilities[f].setup_cost * used[f] for f in range(nb_facilities) + ) + new_obj_f += mathopt.LinearSum( + matrix_length[f, c] * x[f, c] + for f in range(nb_facilities) + for c in range(nb_customers) + ) + s.minimize(new_obj_f) + self.model = s + self.variable_decision = {"x": x, "y": used} + self.constraints_dict = { + "constraint_customer": constraints_customer, + "constraint_capacity": constraint_capacity, + } + self.description_variable_description = { + "x": { + "shape": (nb_facilities, nb_customers), + "type": bool, + "descr": "for each facility/customer indicate" + " if the pair is active, meaning " + "that the customer c is dealt with facility f", + } + } + logger.info("Initialized") + + def convert_to_variable_values( + self, solution: FacilitySolution + ) -> dict[mathopt.Variable, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start() to provide a suitable SolutionHint.variable_values. + See https://or-tools.github.io/docs/pdoc/ortools/math_opt/python/model_parameters.html#SolutionHint + for more information. + + """ + return _LPFacilitySolverBase.convert_to_variable_values(self, solution) + + class LP_Facility_Solver_CBC(SolverFacility): """Milp formulation using cbc solver.""" diff --git a/discrete_optimization/generic_tools/lp_tools.py b/discrete_optimization/generic_tools/lp_tools.py index b046e9949..3d7a6ef9a 100644 --- a/discrete_optimization/generic_tools/lp_tools.py +++ b/discrete_optimization/generic_tools/lp_tools.py @@ -1,7 +1,8 @@ # Copyright (c) 2022 AIRBUS and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. - +import copy +import datetime import logging from abc import abstractmethod from collections.abc import Callable @@ -9,6 +10,7 @@ from typing import Any, Optional, Union import mip +from ortools.math_opt.python import mathopt from discrete_optimization.generic_tools.callbacks.callback import ( Callback, @@ -19,8 +21,11 @@ Problem, Solution, ) -from discrete_optimization.generic_tools.do_solver import SolverDO +from discrete_optimization.generic_tools.do_solver import SolverDO, WarmstartMixin from discrete_optimization.generic_tools.exceptions import SolveEarlyStop +from discrete_optimization.generic_tools.hyperparameters.hyperparameter import ( + EnumHyperparameter, +) from discrete_optimization.generic_tools.result_storage.multiobj_utils import ( TupleFitness, ) @@ -283,6 +288,261 @@ def nb_solutions(self) -> int: return self.model.num_solutions +class OrtoolsMathOptMilpSolver(MilpSolver, WarmstartMixin): + """Milp solver wrapping a solver from pymip library.""" + + hyperparameters = [ + EnumHyperparameter( + name="mathopt_solver_type", + enum=mathopt.SolverType, + ) + ] + + solution_hint: Optional[mathopt.SolutionHint] = None + + @abstractmethod + def convert_to_variable_values( + self, solution: Solution + ) -> dict[mathopt.Variable, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start() to provide a suitable SolutionHint.variable_values. + See https://or-tools.github.io/docs/pdoc/ortools/math_opt/python/model_parameters.html#SolutionHint + for more information. + + Override it in subclasses to have a proper warm start. + + """ + ... + + def convert_to_dual_values( + self, solution: Solution + ) -> dict[mathopt.LinearConstraint, float]: + """Convert a solution to a mapping between model contraints and their values. + + Will be used by set_warm_start() to provide a suitable SolutionHint.dual_values. + See https://or-tools.github.io/docs/pdoc/ortools/math_opt/python/model_parameters.html#SolutionHint + for more information. + Generally MIP solvers do not need this part, but LP solvers do. + + """ + return dict() + + def set_warm_start(self, solution: Solution) -> None: + self.solution_hint = mathopt.SolutionHint( + variable_values=self.convert_to_variable_values(solution), + dual_values=self.convert_to_dual_values(solution), + ) + + def get_var_value_for_ith_solution(self, var: Any, i: int) -> float: + raise NotImplementedError() + + def get_obj_value_for_ith_solution(self, i: int) -> float: + raise NotImplementedError() + + @property + def nb_solutions(self) -> int: + raise NotImplementedError() + + model: Optional[mathopt.Model] = None + termination: mathopt.Termination + early_stopping_exception: Optional[Exception] = None + + def solve( + self, + callbacks: Optional[list[Callback]] = None, + parameters_milp: Optional[ParametersMilp] = None, + mathopt_solver_type: mathopt.SolverType = mathopt.SolverType.CP_SAT, + time_limit: Optional[float] = 30.0, + mathopt_enable_output: bool = False, + mathopt_model_parameters: Optional[mathopt.ModelSolveParameters] = None, + mathopt_additional_solve_parameters: Optional[mathopt.SolveParameters] = None, + **kwargs: Any, + ) -> ResultStorage: + """Solve with OR-Tools MathOpt API + + Args: + callbacks: list of callbacks used to hook into the various stage of the solve + parameters_milp: parameters passed to the MILP solver + mathopt_solver_type: underlying solver type to use. + Passed as `solver_type` to `mathopt.solve()` + time_limit: the solve process stops after this time limit (in seconds). + If None, no time limit is applied. + mathopt_enable_output: turn on mathopt logging + mathopt_model_parameters: passed to `mathopt.solve()` as `model_params` + mathopt_additional_solve_parameters: passed to `mathopt.solve()` as `params`, + except that parameters defined by above `time_limit` and `parameters_milp` + will be overriden by them. + **kwargs: passed to init_model() if model not yet existing + + Returns: + + """ + self.early_stopping_exception = None + callbacks_list = CallbackList(callbacks=callbacks) + + # callback: solve start + callbacks_list.on_solve_start(solver=self) + + # wrap user callback in a mathopt callback + mathopt_cb = MathOptCallback(do_solver=self, callback=callbacks_list) + + # optimize + self.optimize_model( + parameters_milp=parameters_milp, + time_limit=time_limit, + mathopt_solver_type=mathopt_solver_type, + mathopt_cb=mathopt_cb, + mathopt_enable_output=mathopt_enable_output, + mathopt_model_parameters=mathopt_model_parameters, + mathopt_additional_solve_parameters=mathopt_additional_solve_parameters, + **kwargs, + ) + + # raise potential exception found during callback (useful for optuna pruning, and debugging) + if self.early_stopping_exception: + if isinstance(self.early_stopping_exception, SolveEarlyStop): + logger.info(self.early_stopping_exception) + else: + raise self.early_stopping_exception + + # get result storage + res = mathopt_cb.res + + # callback: solve end + callbacks_list.on_solve_end(res=res, solver=self) + + return res + + def optimize_model( + self, + parameters_milp: Optional[ParametersMilp] = None, + mathopt_solver_type: mathopt.SolverType = mathopt.SolverType.CP_SAT, + time_limit: Optional[float] = 30.0, + mathopt_cb: Optional[mathopt.SolveCallback] = None, + mathopt_enable_output: bool = False, + mathopt_model_parameters: Optional[mathopt.ModelSolveParameters] = None, + mathopt_additional_solve_parameters: Optional[mathopt.SolveParameters] = None, + **kwargs: Any, + ) -> mathopt.SolveResult: + """ + + Args: + parameters_milp: parameters for the milp solver + mathopt_solver_type: underlying solver type to use. + Passed as `solver_type` to `mathopt.solve()` + time_limit: the solve process stops after this time limit (in seconds). + If None, no time limit is applied. + mathopt_cb: a mathopt callback passed to `mathopt.solve()` called at each new solution found + mathopt_enable_output: turn on mathopt logging + mathopt_model_parameters: passed to `mathopt.solve()` as `model_params` + mathopt_additional_solve_parameters: passed to `mathopt.solve()` as `params`, + except that parameters defined by above `time_limit`, `parameters_milp`, and `mathopt_enable_output` + will be overriden by them. + **kwargs: passed to init_model() if model not yet existing + + Returns: + + """ + """Optimize the mip Model. + + The solutions are yet to be retrieved via `self.retrieve_solutions()`. + + Args: + time_limit + + """ + if self.model is None: + self.init_model(**kwargs) + if self.model is None: + raise RuntimeError( + "self.model must not be None after self.init_model()." + ) + + if parameters_milp is None: + parameters_milp = ParametersMilp.default() + + # solve parameters + if mathopt_additional_solve_parameters is None: + params = mathopt.SolveParameters() + else: + params = copy.deepcopy(mathopt_additional_solve_parameters) + params.time_limit = datetime.timedelta(seconds=time_limit) + params.absolute_gap_tolerance = parameters_milp.mip_gap_abs + params.relative_gap_tolerance = parameters_milp.mip_gap + params.enable_output = mathopt_enable_output + if mathopt_solver_type != mathopt.SolverType.HIGHS: + # solution_pool_size not supported for HIGHS solver + params.solution_pool_size = parameters_milp.pool_solutions + + # model specific solve parameters + if mathopt_model_parameters is None: + model_params = mathopt.ModelSolveParameters() + else: + model_params = copy.deepcopy(mathopt_model_parameters) + if self.solution_hint is not None: + # warm start: add a solution hint corresponding to the warm start + model_params.solution_hints = [ + self.solution_hint + ] + model_params.solution_hints + + callback_reg = mathopt.CallbackRegistration(events={mathopt.Event.MIP_SOLUTION}) + mathopt_res = mathopt.solve( + self.model, + solver_type=mathopt_solver_type, + params=params, + model_params=model_params, + callback_reg=callback_reg, + cb=mathopt_cb, + ) + self.termination = mathopt_res.termination + logger.info(f"Solver found {len(mathopt_res.solutions)} solutions") + if mathopt_res.termination.reason in [ + mathopt.TerminationReason.OPTIMAL, + mathopt.TerminationReason.FEASIBLE, + ]: + logger.info(f"Objective : {mathopt_res.objective_value()}") + return mathopt_res + + +def _mathopt_cb_get_obj_value_for_current_solution(): + raise RuntimeError("Cannot retrieve objective!") + + +class MathOptCallback: + def __init__(self, do_solver: OrtoolsMathOptMilpSolver, callback: Callback): + self.do_solver = do_solver + self.callback = callback + self.res = do_solver.create_result_storage() + self.nb_solutions = 0 + + def __call__(self, callback_data: mathopt.CallbackData) -> mathopt.CallbackResult: + cb_sol = callback_data.solution + try: + # retrieve and store new solution + sol = self.do_solver.retrieve_current_solution( + get_var_value_for_current_solution=lambda var: cb_sol[var], + get_obj_value_for_current_solution=_mathopt_cb_get_obj_value_for_current_solution, + ) + fit = self.do_solver.aggreg_from_sol(sol) + self.res.append((sol, fit)) + self.nb_solutions += 1 + # end of step callback: stopping? + stopping = self.callback.on_step_end( + step=self.nb_solutions, res=self.res, solver=self.do_solver + ) + except Exception as e: + # catch exceptions because gurobi ignore them and do not stop solving + self.do_solver.early_stopping_exception = e + stopping = True + else: + if stopping: + self.do_solver.early_stopping_exception = SolveEarlyStop( + f"{self.do_solver.__class__.__name__}.solve() stopped by user callback." + ) + return mathopt.CallbackResult(terminate=stopping) + + class GurobiMilpSolver(MilpSolver): """Milp solver wrapping a solver from gurobi library.""" diff --git a/discrete_optimization/knapsack/solvers/lp_solvers.py b/discrete_optimization/knapsack/solvers/lp_solvers.py index dd0379ff5..51220b0b9 100644 --- a/discrete_optimization/knapsack/solvers/lp_solvers.py +++ b/discrete_optimization/knapsack/solvers/lp_solvers.py @@ -10,13 +10,18 @@ from mip import BINARY, MAXIMIZE, xsum from ortools.algorithms.python import knapsack_solver from ortools.linear_solver import pywraplp +from ortools.math_opt.python import mathopt -from discrete_optimization.generic_tools.do_problem import ParamsObjectiveFunction +from discrete_optimization.generic_tools.do_problem import ( + ParamsObjectiveFunction, + Solution, +) from discrete_optimization.generic_tools.do_solver import ResultStorage, WarmstartMixin from discrete_optimization.generic_tools.lp_tools import ( GurobiMilpSolver, MilpSolver, MilpSolverName, + OrtoolsMathOptMilpSolver, PymipMilpSolver, ) from discrete_optimization.generic_tools.mip.pymip_tools import MyModelMilp @@ -52,11 +57,34 @@ def __init__( ) self.variable_decision: dict[str, dict[int, Union["Var", mip.Var]]] = {} self.constraints_dict: dict[ - str, Union["Constr", "QConstr", "MConstr", "GenConstr", "mip.Constr"] + str, + Union[ + "Constr", + "QConstr", + "MConstr", + "GenConstr", + "mip.Constr", + "mathopt.LinearConstraint", + ], ] = {} self.description_variable_description: dict[str, dict[str, Any]] = {} self.description_constraint: dict[str, dict[str, str]] = {} + def convert_to_variable_values( + self, solution: KnapsackSolution + ) -> dict[Any, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start(). + + """ + return { + self.variable_decision["x"][variable_decision_key]: solution.list_taken[i] + for i, variable_decision_key in enumerate( + sorted(self.variable_decision["x"]) + ) + } + def retrieve_current_solution( self, get_var_value_for_current_solution: Callable[[Any], float], @@ -135,6 +163,51 @@ def set_warm_start(self, solution: KnapsackSolution) -> None: ].Start = solution.list_taken[i] +class LPKnapsackMathOpt(OrtoolsMathOptMilpSolver, _BaseLPKnapsack): + def init_model(self, **kwargs: Any) -> None: + self.model = mathopt.Model(name="Knapsack") + self.variable_decision = {"x": {}} + self.description_variable_description = { + "x": { + "shape": self.problem.nb_items, + "type": bool, + "descr": "dictionary with key the item index \ + and value the boolean value corresponding \ + to taking the item or not", + } + } + self.description_constraint["weight"] = { + "descr": "sum of weight of used items doesn't exceed max capacity" + } + weight = {} + list_item = self.problem.list_items + max_capacity = self.problem.max_capacity + x = {} + for item in list_item: + i = item.index + x[i] = self.model.add_binary_variable(name="x_" + str(i)) + weight[i] = item.weight + self.model.maximize( + mathopt.LinearSum(item.value * x[item.index] for item in list_item) + ) + self.variable_decision["x"] = x + self.constraints_dict["weight"] = self.model.add_linear_constraint( + mathopt.LinearSum(weight[i] * x[i] for i in x) <= max_capacity + ) + + def convert_to_variable_values( + self, solution: KnapsackSolution + ) -> dict[mathopt.Variable, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start() to provide a suitable SolutionHint.variable_values. + See https://or-tools.github.io/docs/pdoc/ortools/math_opt/python/model_parameters.html#SolutionHint + for more information. + + """ + return _BaseLPKnapsack.convert_to_variable_values(self, solution) + + class LPKnapsackCBC(SolverKnapsack): def __init__( self, diff --git a/discrete_optimization/maximum_independent_set/solvers/mis_gurobi.py b/discrete_optimization/maximum_independent_set/solvers/mis_gurobi.py index 0d7347ebf..c86419be5 100644 --- a/discrete_optimization/maximum_independent_set/solvers/mis_gurobi.py +++ b/discrete_optimization/maximum_independent_set/solvers/mis_gurobi.py @@ -1,6 +1,5 @@ from __future__ import annotations -from collections.abc import Callable, Sequence from typing import Any import networkx as nx @@ -8,9 +7,7 @@ from discrete_optimization.generic_tools.do_solver import WarmstartMixin from discrete_optimization.generic_tools.graph_api import get_node_attributes -from discrete_optimization.generic_tools.result_storage.result_storage import ( - ResultStorage, -) +from discrete_optimization.maximum_independent_set.solvers.mis_lp import BaseLPMisSolver try: import gurobipy @@ -18,33 +15,14 @@ gurobi_available = False else: gurobi_available = True - from gurobipy import GRB, LinExpr, Model, Var + from gurobipy import GRB, LinExpr, Model, Var, quicksum -from discrete_optimization.generic_tools.do_problem import ParamsObjectiveFunction from discrete_optimization.generic_tools.lp_tools import GurobiMilpSolver -from discrete_optimization.maximum_independent_set.mis_model import ( - MisProblem, - MisSolution, -) -from discrete_optimization.maximum_independent_set.solvers.mis_solver import MisSolver +from discrete_optimization.maximum_independent_set.mis_model import MisSolution -class BaseGurobiMisSolver(MisSolver, GurobiMilpSolver, WarmstartMixin): - vars_node: Sequence[Var] - - def retrieve_current_solution( - self, - get_var_value_for_current_solution: Callable[[Any], float], - get_obj_value_for_current_solution: Callable[[], float], - ) -> MisSolution: - - chosen = [0] * self.problem.number_nodes - - for i in range(0, self.problem.number_nodes): - if get_var_value_for_current_solution(self.vars_node[i]) > 0.5: - chosen[i] = 1 - - return MisSolution(self.problem, chosen) +class BaseGurobiMisSolver(GurobiMilpSolver, BaseLPMisSolver, WarmstartMixin): + vars_node: dict[int, Var] def set_warm_start(self, solution: MisSolution) -> None: """Make the solver warm start from the given solution.""" @@ -66,8 +44,11 @@ def init_model(self, **kwargs: Any) -> None: value = get_node_attributes(self.problem.graph_nx, "value", default=1) # Set objective - obj_exp = LinExpr() - obj_exp.addTerms(value.values(), self.vars_node.select()) + obj_exp = LinExpr(0.0) + obj_exp += quicksum( + value[self.problem.index_to_nodes[i]] * self.vars_node[i] + for i in range(0, self.problem.number_nodes) + ) self.model.setObjective(obj_exp, GRB.MAXIMIZE) # for each edge it's impossible to choose the two nodes of this edges in our solution @@ -96,7 +77,7 @@ def init_model(self, **kwargs: Any) -> None: ) # Set objective - adj = nx.to_numpy_array(self.problem.graph_nx) + adj = nx.to_numpy_array(self.problem.graph_nx, nodelist=self.problem.nodes) J = np.identity(self.problem.number_nodes) A = J - adj self.model.setObjective(self.vars_node @ A @ self.vars_node, GRB.MAXIMIZE) diff --git a/discrete_optimization/maximum_independent_set/solvers/mis_lp.py b/discrete_optimization/maximum_independent_set/solvers/mis_lp.py new file mode 100644 index 000000000..2d71bf9ee --- /dev/null +++ b/discrete_optimization/maximum_independent_set/solvers/mis_lp.py @@ -0,0 +1,35 @@ +from collections.abc import Callable +from typing import Any + +from discrete_optimization.generic_tools.lp_tools import MilpSolver +from discrete_optimization.maximum_independent_set.mis_model import MisSolution +from discrete_optimization.maximum_independent_set.solvers.mis_solver import MisSolver + + +class BaseLPMisSolver(MisSolver, MilpSolver): + vars_node: dict[int, Any] + + def retrieve_current_solution( + self, + get_var_value_for_current_solution: Callable[[Any], float], + get_obj_value_for_current_solution: Callable[[], float], + ) -> MisSolution: + + chosen = [0] * self.problem.number_nodes + + for i in range(0, self.problem.number_nodes): + if get_var_value_for_current_solution(self.vars_node[i]) > 0.5: + chosen[i] = 1 + + return MisSolution(self.problem, chosen) + + def convert_to_variable_values(self, solution: MisSolution) -> dict[Any, float]: + """Convert a solution to a mapping between model variables and their values. + + Will be used by set_warm_start(). + + """ + return { + self.vars_node[i]: solution.chosen[i] + for i in range(0, self.problem.number_nodes) + } diff --git a/discrete_optimization/maximum_independent_set/solvers/mis_mathopt.py b/discrete_optimization/maximum_independent_set/solvers/mis_mathopt.py new file mode 100644 index 000000000..c2ec8566c --- /dev/null +++ b/discrete_optimization/maximum_independent_set/solvers/mis_mathopt.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any + +import networkx as nx +import numpy as np + +from discrete_optimization.generic_tools.do_problem import Solution +from discrete_optimization.generic_tools.do_solver import WarmstartMixin +from discrete_optimization.generic_tools.graph_api import get_node_attributes +from discrete_optimization.maximum_independent_set.solvers.mis_lp import BaseLPMisSolver + +try: + import gurobipy +except ImportError: + gurobi_available = False +else: + gurobi_available = True + from gurobipy import GRB, LinExpr, Model, Var + +from ortools.math_opt.python import mathopt + +from discrete_optimization.generic_tools.lp_tools import ( + GurobiMilpSolver, + OrtoolsMathOptMilpSolver, +) +from discrete_optimization.maximum_independent_set.mis_model import MisSolution + + +class MisMathOptMilpSolver(OrtoolsMathOptMilpSolver, BaseLPMisSolver): + def init_model(self, **kwargs: Any) -> None: + + # Create a new model + self.model = mathopt.Model() + + # Create variables + self.vars_node = { + i: self.model.add_binary_variable(name=f"N{i}") + for i in range(self.problem.number_nodes) + } + + # Set objective + value = get_node_attributes(self.problem.graph_nx, "value", default=1) + obj_exp = 0.0 + obj_exp += mathopt.LinearSum( + value[self.problem.index_to_nodes[i]] * self.vars_node[i] + for i in range(0, self.problem.number_nodes) + ) + self.model.maximize(obj_exp) + + # for each edge it's impossible to choose the two nodes of this edges in our solution + + for edge in self.problem.graph_nx.edges(): + self.model.add_linear_constraint( + self.vars_node[self.problem.nodes_to_index[edge[0]]] + <= 1 - self.vars_node[self.problem.nodes_to_index[edge[1]]] + ) + + def convert_to_variable_values( + self, solution: MisSolution + ) -> dict[mathopt.Variable, float]: + return BaseLPMisSolver.convert_to_variable_values(self, solution) + + +class MisMathOptQuadraticSolver(OrtoolsMathOptMilpSolver, BaseLPMisSolver): + """ + The quadratic gurobi solver work only for graph without weight on nodes, + if there is weight, it's going to ignore them + """ + + def init_model(self, **kwargs: Any) -> None: + + # Create a new model + self.model = mathopt.Model() + + # Create variables + self.vars_node = [ + self.model.add_binary_variable(name=f"N{i}") + for i in range(self.problem.number_nodes) + ] + + # Set objective + adj = nx.to_numpy_array(self.problem.graph_nx) + J = np.identity(self.problem.number_nodes) + A = J - adj + self.model.maximize(self.vars_node @ A @ self.vars_node) + + def convert_to_variable_values( + self, solution: MisSolution + ) -> dict[mathopt.Variable, float]: + return BaseLPMisSolver.convert_to_variable_values(self, solution) diff --git a/tests/coloring/test_coloring.py b/tests/coloring/test_coloring.py index 9316a4c6f..7d4918b69 100644 --- a/tests/coloring/test_coloring.py +++ b/tests/coloring/test_coloring.py @@ -34,6 +34,7 @@ ColoringCPSatSolver, ModelingCPSat, ) +from discrete_optimization.coloring.solvers.coloring_lp_solvers import ColoringLPMathOpt from discrete_optimization.coloring.solvers.greedy_coloring import ( GreedyColoring, NXGreedyColoringMethod, @@ -564,5 +565,78 @@ def test_color_lp_gurobi_cb_exception(): ) +def test_color_lp_ortools_mathopt(): + file = [f for f in get_data_available() if "gc_70_1" in f][0] + color_problem = parse_file(file) + solver = ColoringLPMathOpt( + color_problem, + params_objective_function=get_default_objective_setup(color_problem), + ) + result_store = solver.solve(parameters_milp=ParametersMilp.default()) + solution = result_store.get_best_solution_fit()[0] + assert color_problem.satisfy(solution) + assert len(result_store) > 1 + + # first solution is not start_solution + assert result_store[0][0].colors != solver.start_solution.colors + + # warm start => first solution is start_solution + solver2 = ColoringLPMathOpt( + color_problem, + params_objective_function=get_default_objective_setup(color_problem), + ) + solver2.init_model() + solver2.set_warm_start(solver.start_solution) + result_store = solver2.solve(parameters_milp=ParametersMilp.default()) + assert result_store[0][0].colors == solver.start_solution.colors + + +def test_color_lp_ortools_mathopt_cb_log(): + file = [f for f in get_data_available() if "gc_70_1" in f][0] + color_problem = parse_file(file) + solver = ColoringLPMathOpt( + color_problem, + params_objective_function=get_default_objective_setup(color_problem), + ) + tracker = NbIterationTracker() + callbacks = [tracker] + result_store = solver.solve( + parameters_milp=ParametersMilp.default(), callbacks=callbacks + ) + solution = result_store.get_best_solution_fit()[0] + assert len(result_store) > 1 + # check tracker called at each solution found + assert tracker.nb_iteration == len(result_store) + + +def test_color_lp_ortools_mathopt_stop(): + file = [f for f in get_data_available() if "gc_70_1" in f][0] + color_problem = parse_file(file) + solver = ColoringLPMathOpt( + color_problem, + params_objective_function=get_default_objective_setup(color_problem), + ) + stopper = NbIterationStopper(nb_iteration_max=1) + callbacks = [stopper] + result_store = solver.solve( + parameters_milp=ParametersMilp.default(), callbacks=callbacks + ) + # check stop after 1st iteration + assert len(result_store) == 1 + + +def test_color_lp_ortools_mathopt_cb_exception(): + file = [f for f in get_data_available() if "gc_70_1" in f][0] + color_problem = parse_file(file) + solver = ColoringLPMathOpt( + color_problem, + params_objective_function=get_default_objective_setup(color_problem), + ) + with pytest.raises(RuntimeError, match="Explicit crash"): + solver.solve( + parameters_milp=ParametersMilp.default(), callbacks=[MyCallbackNok()] + ) + + if __name__ == "__main__": test_solvers() diff --git a/tests/facility/test_facility_lp.py b/tests/facility/test_facility_lp.py index 3a98c5053..da47c79ea 100644 --- a/tests/facility/test_facility_lp.py +++ b/tests/facility/test_facility_lp.py @@ -6,7 +6,9 @@ import platform import pytest +from ortools.math_opt.python import mathopt +from discrete_optimization.facility.facility_model import FacilitySolution from discrete_optimization.facility.facility_parser import ( get_data_available, parse_file, @@ -14,6 +16,7 @@ from discrete_optimization.facility.solvers.facility_lp_solver import ( LP_Facility_Solver, LP_Facility_Solver_CBC, + LP_Facility_Solver_MathOpt, LP_Facility_Solver_PyMip, MilpSolverName, ParametersMilp, @@ -62,6 +65,43 @@ def test_facility_lp_gurobi(): ) +def test_facility_lp_ortools_mathopt(): + file = [f for f in get_data_available() if os.path.basename(f) == "fl_3_1"][0] + color_problem = parse_file(file) + solver = LP_Facility_Solver_MathOpt(color_problem) + kwargs = dict( + time_limit=20, + use_matrix_indicator_heuristic=False, + ) + result_storage = solver.solve(**kwargs) + solution, fit = result_storage.get_best_solution_fit() + assert color_problem.satisfy(solution) + + # test warm start + start_solution = ( + GreedySolverFacility(problem=color_problem) + .solve() + .get_best_solution() + # FacilitySolution(problem=color_problem, facility_for_customers=[1,1,2,0]) + ) + + # first solution is not start_solution + assert ( + result_storage[0][0].facility_for_customers + != start_solution.facility_for_customers + ) + + # warm start => first solution is start_solution + solver = LP_Facility_Solver_MathOpt(color_problem) + solver.init_model(**kwargs) + solver.set_warm_start(start_solution) + result_storage = solver.solve(mathopt_enable_output=True, **kwargs) + assert ( + result_storage[0][0].facility_for_customers + == start_solution.facility_for_customers + ) + + def test_facility_lp_cbc(): file = [f for f in get_data_available() if os.path.basename(f) == "fl_100_7"][0] color_problem = parse_file(file) diff --git a/tests/knapsack/test_knapsack_solvers.py b/tests/knapsack/test_knapsack_solvers.py index 56a3685f3..dd971e35e 100644 --- a/tests/knapsack/test_knapsack_solvers.py +++ b/tests/knapsack/test_knapsack_solvers.py @@ -7,6 +7,7 @@ import numpy as np import pytest +from ortools.math_opt.python import mathopt from discrete_optimization.generic_tools.lp_tools import PymipMilpSolver from discrete_optimization.knapsack.knapsack_model import ( @@ -19,7 +20,10 @@ ) from discrete_optimization.knapsack.knapsack_solvers import solve, solvers_map from discrete_optimization.knapsack.solvers.greedy_solvers import GreedyBest -from discrete_optimization.knapsack.solvers.lp_solvers import LPKnapsackGurobi +from discrete_optimization.knapsack.solvers.lp_solvers import ( + LPKnapsackGurobi, + LPKnapsackMathOpt, +) try: import gurobipy @@ -80,3 +84,24 @@ def test_gurobi_warm_start(): solver.set_warm_start(start_solution) result_storage = solver.solve() assert result_storage[0][0].list_taken == start_solution.list_taken + + +def test_mathopt_warm_start(): + small_example = [f for f in get_data_available() if "ks_40_0" in f][0] + knapsack_model: KnapsackModel = parse_file(small_example) + + solver = LPKnapsackMathOpt(knapsack_model) + kwargs = dict( + mathopt_enable_output=True, mathopt_solver_type=mathopt.SolverType.GSCIP + ) + result_storage = solver.solve(**kwargs) + + start_solution = GreedyBest(problem=knapsack_model).solve().get_best_solution() + + # first solution is not start_solution + assert result_storage[0][0].list_taken != start_solution.list_taken + + # warm start => first solution is start_solution + solver.set_warm_start(start_solution) + result_storage = solver.solve(**kwargs) + assert result_storage[0][0].list_taken == start_solution.list_taken diff --git a/tests/maximum_independent_set/test_maximum_independent_set.py b/tests/maximum_independent_set/test_maximum_independent_set.py index d5373dd4f..343261704 100644 --- a/tests/maximum_independent_set/test_maximum_independent_set.py +++ b/tests/maximum_independent_set/test_maximum_independent_set.py @@ -4,9 +4,13 @@ import logging import pytest +from ortools.math_opt.python import mathopt from discrete_optimization.generic_tools.cp_tools import ParametersCP -from discrete_optimization.maximum_independent_set.mis_model import MisProblem +from discrete_optimization.maximum_independent_set.mis_model import ( + MisProblem, + MisSolution, +) from discrete_optimization.maximum_independent_set.mis_parser import ( dimacs_parser_nx, get_data_available, @@ -23,6 +27,10 @@ from discrete_optimization.maximum_independent_set.solvers.mis_kamis import ( MisKamisSolver, ) +from discrete_optimization.maximum_independent_set.solvers.mis_mathopt import ( + MisMathOptMilpSolver, + MisMathOptQuadraticSolver, +) from discrete_optimization.maximum_independent_set.solvers.mis_ortools import ( MisOrtoolsSolver, ) @@ -107,3 +115,31 @@ def test_solver_gurobi(): # force first solution to be the hinted one result_storage = solver.solve() assert result_storage[0][0].chosen == start_solution.chosen + + +def test_solver_mathopt(): + small_example = [f for f in get_data_available() if "1dc.64" in f][0] + mis_model: MisProblem = dimacs_parser_nx(small_example) + + solver = MisMathOptMilpSolver(mis_model) + kwargs = dict( + mathopt_solver_type=mathopt.SolverType.CP_SAT, mathopt_enable_output=True + ) + result_storage = solver.solve(**kwargs) + + sol, fit = result_storage.get_best_solution_fit(satisfying=mis_model) + assert isinstance(sol, MisSolution) + + +def test_solver_quad_mathopt(): + small_example = [f for f in get_data_available() if "1dc.64" in f][0] + mis_model: MisProblem = dimacs_parser_nx(small_example) + + solver = MisMathOptQuadraticSolver(mis_model) + kwargs = dict( + mathopt_solver_type=mathopt.SolverType.GSCIP, mathopt_enable_output=True + ) + result_storage = solver.solve(**kwargs) + + sol, fit = result_storage.get_best_solution_fit(satisfying=mis_model) + assert isinstance(sol, MisSolution)