From 0540bc265cc64c108c5428a2fd00c5b17715030b Mon Sep 17 00:00:00 2001 From: toda5603 Date: Thu, 5 Dec 2024 17:49:31 +0900 Subject: [PATCH] fix bug, add new encodings, rename methods, and improve code --- CHANGES.rst | 71 ++ docs/getting-started.rst | 74 +- docs/howto-use-pygplib.rst | 12 - examples/bmc.py | 633 ------------ examples/recon.py | 211 ---- poetry.lock | 620 ------------ pygplib/__init__.py | 2 + pygplib/absexpr.py | 41 +- pygplib/absfo.py | 72 +- pygplib/absneg.py | 33 +- pygplib/absprop.py | 139 ++- pygplib/baserelst.py | 41 +- pygplib/be.py | 102 +- pygplib/cnf.py | 245 +++-- pygplib/constraints.py | 59 ++ pygplib/ecc.py | 183 ++-- pygplib/fog.py | 267 +++-- pygplib/grst.py | 672 +++++++++---- pygplib/name.py | 20 +- pygplib/op.py | 167 +++- pygplib/prop.py | 51 +- pygplib/symrelst.py | 17 +- pyproject.toml | 6 +- tests/formula.py | 7 + tests/graph.py | 39 + examples/__init__.py => tests/name.py | 0 tests/pool.py | 83 ++ tests/solver.py | 26 + tests/test_blackbox.py | 59 ++ tests/test_bmc.py | 187 ---- tests/test_cnf.py | 48 - tests/test_constraints.py | 38 + tests/test_ecc.py | 290 +----- tests/test_fog.py | 88 +- tests/test_fog_excp.py | 2 +- tests/test_grst.py | 1331 +++++++++++++++++-------- tests/test_name.py | 2 - tests/test_prop.py | 107 +- tests/test_prop_excp.py | 23 +- tests/test_symrelst.py | 146 +-- tests/tools.py | 17 + 41 files changed, 2864 insertions(+), 3367 deletions(-) delete mode 100644 examples/bmc.py delete mode 100644 examples/recon.py delete mode 100644 poetry.lock create mode 100644 pygplib/constraints.py create mode 100644 tests/formula.py create mode 100644 tests/graph.py rename examples/__init__.py => tests/name.py (100%) create mode 100644 tests/pool.py create mode 100644 tests/solver.py create mode 100644 tests/test_blackbox.py delete mode 100644 tests/test_bmc.py delete mode 100644 tests/test_cnf.py create mode 100644 tests/test_constraints.py create mode 100644 tests/tools.py diff --git a/CHANGES.rst b/CHANGES.rst index 2cecccb..f1b7eb1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,6 +3,77 @@ CHANGES ======= +Version 2.3.0 - 2024-12-05 +-------------------------- + +Fixed +^^^^^ + +- absexpr.py: constant atoms of different classes (say, true constants of Prop and Fog) were represented as the same objects due to bug in _to_key method of absexpr.py, which are now distinguished by adding class name to key. +- ecc.py: compute_separating_ecc method did not compute separating ecc, which is now corrected and tested. +- cnf.py: all external indices from 1 to self.base are ensured to be decodable. + +Changed +^^^^^^^ + +- absexpr.py: clear method of IndexGen class is changed so as to reset initial index too. +- absprop.py: reduce_formula_step method is changed so as to reduce more by handling cases of implies and iff operators. +- prop.py: syntax of variable accepted by read method is simplified +- fog.py: _normalize_aux method (nomalizing the order of values in aux fields) is changed, depending on atom type (less-than relation, introduced in this release, is not normalized, while the others are normalized same as before). +- ecc.py: simpgraph module is used to handle graph. +- cnf.py: constructor is changed to accept an iterable object. +- cnf.py: encoding to external indices and decoding to internal indices are improved and tested. +- name.py: lookup_index method is changed to fails if names of auxiliary variables introduced by get_aux_index method (see below) are given; if the leading character is "_" and the name is already registered, then this means that it is the index of an auxiliary variable. +- be.py: management of variables introduced in boolean encodings are changed; they are holded in instance variables of be object and managed by be object. Such variables were previosly registered to name manager with variable names seprated by '@', which is now deprecated. +- grst.py: it is improved so as to be extendable and easy to add new encodings. + +Added +^^^^^ + +- fog.py: atom of form x < y becomes accepted by read method +- name.py: get_aux_index method is added to be able to introduce auxiliary variables in boolean encoding as needed +- be.py: is_decodable_boolean_var method to determine if a boolean variable is decodable into an FO-variable. +- grst.py: new encoding methods (direct, log, and vertex) are implemented. +- op.py: compute_size method to compute formula size. +- constraints.py: encoding of cardinality constraint (at-most-r constraint) + +Deprecated +^^^^^^^^^^ + +- the following methods on the left of ">" have been duplicated and will be remove in version 3.0.0. + - absexpr.py: is_atom_term > is_atom + - absexpr.py: is_unop_term > is_unop + - absexpr.py: is_binop_term > is_binop + - absneg.py: is_neg_term > is_neg + - absprop.py: is_land_term > is_land + - absprop.py: is_lor_term > is_lor + - absprop.py: is_implies_term > is_implies + - absprop.py: is_iff_term > is_iff + - absfo.py: is_forall_term > is_forall + - absfo.py: is_exists_term > is_exists + - absfo.py: is_qf_term > is_qf + - prop.py: get_atom_value > (to be deleted) + - fog.py: get_atom_value > get_atom_arg + - op.py: generator > generate_subformulas + - fog.py: le > lt +  - fog.py: _LE > _LT + - fog.py: get_le_tag > get_lt_tag + - fog.py: is_le_atom > is_lt_atom + - fog.py: action_le_rel > action_lt_rel + - cnf.py: get_nvar > (to be deleted) + - cnf.py: get_ncls > (to be deleted) + - cnf.py: get_clause > (to be deleted) + - cnf.py: encode_lit > _encode_lit + - cnf.py: decode_lit > _decode_lit + - be.py: get_symbol_index > get_variable_position_pair + - be.py: get_code_pos > get_variable_position_pair + - be.py: exists_symbol > (deleted) + - be.py: exists_boolean_var > (deleted) + - grst.py: encode_eq > be_eq + - grst.py: encode_edg > be_edg + - grst.py: encode_F > be_F + - grst.py: encode_T > be_T + Version 2.2.0 - 2024-09-09 -------------------------- diff --git a/docs/getting-started.rst b/docs/getting-started.rst index 5b44879..2d46917 100644 --- a/docs/getting-started.rst +++ b/docs/getting-started.rst @@ -11,7 +11,7 @@ Install Pygplib Quick Example ------------- -Let us count the number of independent sets of size ``3`` in the following +Let us randomly generate independent sets of size ``3`` in the following graph, ``G``, with ``pygplib``. .. code:: shell-session @@ -24,7 +24,7 @@ graph, ``G``, with ``pygplib``. # | | | # V4---V7 V6 -Import necessary modules and create a graph structure. +Let us import necessary modules and create a graph structure. .. code:: python @@ -34,7 +34,7 @@ Import necessary modules and create a graph structure. edge_list = [(1,2),(1,3),(2,4),(2,5),(3,6),(4,7),(5,7)] # edges of G st = GrSt(vertex_list, edge_list) -Parse an expression and construct a first-order formula object. +Let us parse an expression and construct a first-order formula object. See :ref:`FirstOrderLogicofGraphs` and :ref:`FormatofFirstOrderFormula` for details. @@ -43,7 +43,7 @@ details. f = Fog.read("(~ edg(x1,x2)) & (~ edg(x1,x3)) & (~ edg(x2,x3)) "\ +"& (~ x1=x2) & (~ x1=x3) & (~ x2=x3)") -Perform Boolean encoding for ``f`` and +Let us perform Boolean encoding for ``f`` and compute the domain constraint for each free variable of ``f``. The list ``[g, ] + li`` consists of propositional formulas such that ``f`` is satisfiable in ``G`` if and only if the conjunction of propositional formulas is @@ -58,34 +58,52 @@ The encodings performed in this code block are described in more details in :ref g = op.perform_boolean_encoding(f, st) li = [st.compute_domain_constraint(v) \ for v in op.get_free_vars(f)] - mgr = Cnf( [g, ] + li ) + mgr = Cnf( [g, ] + li, st=st) -Generate a CNF formula to ``f.cnf`` in `DIMACS CNF format -`__ . -.. code:: python +Let us import unigen, `UniGen approximately uniform sampler `__ , +and perform random sampling of solutions of the cnf. +(If necessary, install unigen module beforehand.) - with open("f.cnf","w") as out: - mgr.write(stream=out) +.. code:: python + from pysat.formula import CNF + from pysat.solvers import Solver + from pyunigen import Sampler + + num=5 # number of samples to be generated + + sampler=Sampler() + for clause in mgr.cnf: + sampler.add_clause(clause) + + cells, hashes, samples = sampler.sample(num=num, sampling_set=range(1,mgr.base+1)) + for ext_partial_assign in samples: + with Solver(\ + bootstrap_with=CNF(\ + from_clauses=\ + list(mgr.cnf) + [(lit,) for lit in ext_partial_assign])) as solver: + if solver.solve(): + ext_full_assign = solver.get_model() # external CNF vars. + int_assign = mgr.decode_assignment(ext_full_assign) # internal CNF vars. + fo_assign = struct.decode_assignment(int_assign) # first-order vars. + ans = sorted([struct.object_to_vertex(fo_assign[key]) \ + for key in fo_assign.keys()]) + print(ans) + else: + print("Unexpected error occured during sampling!") + +Sampling solutions of combinatrial problems is computationally hard in general. +To make the above computation more efficient, pygplib provides a technique of so-called symmetry breaking. +The formula of independent set is symmetry, i.e., any performulation of +a satisfying assignment of vertices to first-order variables is also a solution of the formula, +which results in an enormous number of solutions, making it hard to perform sampling. +To overcome this, let us consider the following formula to which the constraint that all vertices assigned to variables are sorted is added instead of all-different constraint. -To count the number of solutions, -download and build a model counter `sharpSAT -`__ . -Run the following command. +.. code:: python -.. code:: shell-session + f = Fog.read("x1`__ . The ``pygplib`` in itself does not provide any functionality of diff --git a/examples/bmc.py b/examples/bmc.py deleted file mode 100644 index bb944d7..0000000 --- a/examples/bmc.py +++ /dev/null @@ -1,633 +0,0 @@ -"""Module providing bounded model checking for graph reconfiguration.""" -import re -import sys - -from collections.abc import Callable -from functools import reduce - -from pygplib import NameMgr, Fog -from pygplib import op -from pygplib import GrSt - - -class TplCnfParam: - """Collection of parameters, used when CNF is generated. - - CNF is generated by coping "template CNF" with variables changed. - Here, template CNF is a CNF which is independent of actual state. - CNF of each state can be computed by replacing variable indices of - templace CNF with those in each state. - - NOTE: - The parameters, target_base and target_total, are not set in - the initialization time of this object, - which have to be determined each time CNF is generated. - """ - - def __init__( - self, - tpl_base: int = 0, - tpl_aux_size: int = 0, - template_cnf: tuple[tuple[int, ...], ...] = None, - target_base: int = 0, - target_total: int = 0, - is_copied: bool = True, - ): - """Initializes TplCnfParam objects.""" - self._tpl_base = tpl_base - """Index of aux vars in template cnf, starting from this number plus 1. - (all non-aux vars are smaller than this number)""" - self._tpl_aux_size = tpl_aux_size - """Number of aux vars (introduced during encoding) in tempalte cnf""" - self._template_cnf = template_cnf - """Template CNF computed in advance""" - self._target_base = target_base - """Aux base, used when CNF is generated with template CNF""" - self._target_total = target_total - """Total number of aux vars, used when CNF is generated""" - self._is_copied = is_copied - """Is template need to be cpied?""" - - def get_tpl_base(self) -> int: - """Gets tpl_base.""" - return self._tpl_base - - def get_tpl_aux_size(self) -> int: - """Gets tpl_aux_size.""" - return self._tpl_aux_size - - def get_template_cnf(self) -> tuple[tuple[int, ...], ...]: - """Gets template_cnf.""" - return self._template_cnf - - def get_target_base(self) -> int: - """Gets target_base.""" - return self._target_base - - def get_target_total(self) -> int: - """Gets target_total.""" - return self._target_total - - def set_tpl_base(self, tpl_base: int) -> None: - """Sets tpl_base.""" - self._tpl_base = tpl_base - - def set_tpl_aux_size(self, tpl_aux_size: int) -> None: - """Sets tpl_aux_size.""" - self._tpl_aux_size = tpl_aux_size - - def set_template_cnf(self, template_cnf: tuple[tuple[int, ...], ...]) -> None: - """Sets template_cnf.""" - self._template_cnf = template_cnf - - def set_target_base(self, target_base) -> None: - """Sets target_base.""" - self._target_base = target_base - - def set_target_total(self, target_total) -> None: - """Sets target_total.""" - self._target_total = target_total - - def is_copied(self) -> bool: - """Decides whether template CNF is copied.""" - return self._is_copied - - -class Bmc: - """Bounded model checking for graph reconfiguration problem. - - Bmc object is initialized with a collection of first-order formulas, - with which a graph reconfiguration problem instance is defined. - It is able to compute CNF for the bounded version of that instance - and to write it down in DIMACS CNF format. - It is also able to decode truth assignment of variables in DIMACS CNF - into that of propotional variables, which have correspondence to - first-order variables and hence can be decoded further into assignment - of first-order variables using graph structure of Fog class. - """ - - def __init__( - self, - state_expr: Fog, - trans_expr: Fog, - ini_expr: Fog, - fin_expr: Fog, - st: GrSt, - prefix: str = "nx_", - trans_type: str = "None", - ): - """Initializes BMC object. - - Args: - state_expr: first-order formula of state property - trans_expr: first-order formula of transition relation - (possibly None) - ini_expr: first-order formula of init state (or list of integers) - fin_expr: first-order formula of final state (or list of integers) - prefix: with which name of next state variable begins. - trans_type: built-in transition relation type (optional). - Set either TJ (token jumping) or TS (token sliding) - to trans_type (and set None to trans_expr). - """ - if st is None: - raise Exception("Set graph structure of Fog class") - if state_expr is None: - raise Exception("Set state expression") - if ini_expr is None: - raise Exception("Set initial state expression") - if fin_expr is None: - raise Exception("Set final state expression") - - - self._curr_vars = op.get_free_vars(state_expr) - """variables in current state, ordered based on names""" - self._curr_vars = type(self)._sort_vars(self._curr_vars) - self._nof_free_vars = len(self._curr_vars) - """number of free variables in current state""" - - type(self)._check_variable_names_in_expr(state_expr, \ - trans_expr, prefix) - type(self)._check_variable_names_in_ini_fin_expr(self._curr_vars, \ - ini_expr, fin_expr, st) - - self.st = st - """graph structure""" - - # Determine correspondence of variables in current state and next state - nvars = [] # variables in next state - self._varloc = {} - """variable location""" - for pos in range(self._nof_free_vars): - v = self._curr_vars[pos] - # If trans expression is not given, new name is added. - next_name = prefix + NameMgr.lookup_name(v) - w = NameMgr.lookup_index(next_name) - nvars.append(w) - self._varloc[v] = pos - self._varloc[w] = pos - self._next_vars = tuple(nvars) - """variables in next state""" - - # Set default transition relation if necessary - if trans_expr is None: - if trans_type == "TJ": - trans_expr = type(self)._compute_tj_template( - self._curr_vars, self._next_vars - ) - elif trans_type == "TS": - trans_expr = type(self)._compute_ts_template( - self._curr_vars, self._next_vars - ) - else: - raise Exception(f"trans_type={trans_type} is undefined") - - self._prop_ini = op.perform_boolean_encoding(ini_expr, self.st) - """propositional formula of initial state""" - self._prop_fin = op.perform_boolean_encoding(fin_expr, self.st) - """propositional formula of final state""" - self._prop_state = op.perform_boolean_encoding(state_expr, self.st) - """propositional formula of each state""" - self._prop_trans = op.perform_boolean_encoding(trans_expr, self.st) - """propositional formula of transition relations of adjacent states""" - - self._state_size = self._nof_free_vars * st.code_length - """number of cnf variables in each state""" - - # Template CNF for state property is computed in advance. - # CNF of each state will be instantiated - # by changing variable indices of this template. - expr_tup = (self._prop_state,) \ - + tuple([st.compute_domain_constraint(v) \ - for v in op.get_free_vars(state_expr)]) - base, naux, template_cnf = op.compute_cnf(expr_tup) - # NOTE: the parameters, target_base and target_total, are not yet set, - # which have to be determined each time cnf is generated. - self._state_param = TplCnfParam( - tpl_base=base, tpl_aux_size=naux, template_cnf=template_cnf, is_copied=True - ) - """parameters used when cnf of state property is generated""" - - # Template CNF for transition relation, - # used in the same way as state property. - expr_tup = (self._prop_trans,) - base, naux, template_cnf = op.compute_cnf(expr_tup) - # NOTE: the parameters, target_base and target_total, are not yet set, - # which have to be determined each time cnf is generated. - self._trans_param = TplCnfParam( - tpl_base=base, tpl_aux_size=naux, template_cnf=template_cnf, is_copied=True - ) - """parameters used when cnf of transition relation is generated""" - - self._total_state_size = 0 # determined in CNF generation - self._trans_aux_base = 0 # determined in CNF generation - - self.nvar = 0 - """number of variables in cnf""" - self.cnf = None - """cnf""" - - @staticmethod - def _sort_vars(tup: tuple) -> tuple: - """Sorts variable indices based on their names.""" - names = [NameMgr.lookup_name(i) for i in tup] - sorted_names = sorted(names) - res = [NameMgr.lookup_index(s) for s in sorted_names] - return tuple(res) - - @staticmethod - def _compute_tj_template(curr_vars, next_vars) -> Fog: - """Returns first-order formula of Token Jumping.""" - terms = [] - for i, v in enumerate(curr_vars): - lits = [Fog.neg(Fog.eq(v, w)) for w in next_vars] + [ - Fog.eq(curr_vars[j], w) for j, w in enumerate(next_vars) if i != j - ] - res = reduce(Fog.land, lits, Fog.true_const()) - terms.append(res) - return reduce(Fog.lor, terms, Fog.false_const()) - - @staticmethod - def _compute_ts_template(curr_vars, next_vars) -> Fog: - """Returns first-order formula of Token Sliding.""" - terms = [] - for i, v in enumerate(curr_vars): - lits = ( - [Fog.edg(v, next_vars[i])] - + [Fog.neg(Fog.eq(v, w)) for w in next_vars] - + [Fog.eq(curr_vars[j], w) for j, w in enumerate(next_vars) if i != j] - ) - res = reduce(Fog.land, lits, Fog.true_const()) - terms.append(res) - return reduce(Fog.lor, terms, Fog.false_const()) - - @staticmethod - def _generate_cnf_at_time( - result_cnf: list[tuple], - time: int, - encode_func: Callable[[int, int, TplCnfParam], int], - param: TplCnfParam, - ) -> None: - """Generates CNF of specified time.""" - for clause in param.get_template_cnf(): - new_clause = [] - for lit in clause: - new_lit = encode_func(lit, time, param) - new_clause.append(new_lit) - result_cnf.append(tuple(new_clause)) - - def generate_cnf( - self, bound: int, ini_expr: Fog = None, fin_expr: Fog = None - ) -> None: - """Generates CNF.""" - if bound < 0: - raise ValueError("bound must be larger than or equal to 0") - - type(self)._check_variable_names_in_ini_fin_expr( - self._curr_vars, ini_expr, fin_expr, self.st - ) - if ini_expr is not None: - self._prop_ini = op.perform_boolean_encoding(ini_expr, self.st) - - if fin_expr is not None: - self._prop_fin = op.perform_boolean_encoding(fin_expr, self.st) - - self._total_state_size = self._state_size * (bound + 1) - self.nvar = self._total_state_size - - # CNF-Encoding of state properties - self._state_param.set_target_base(self._total_state_size) - self._state_param.set_target_total( - self._state_param.get_tpl_aux_size() * (bound + 1) - ) - - fin_cnf = [] - for time in range(bound + 1): - type(self)._generate_cnf_at_time( - fin_cnf, time, self._encode_state_func, self._state_param - ) - self.nvar += self._state_param.get_target_total() - - # CNF-Encoding of transition properties - if 0 < bound: - - self._trans_param.set_target_base( - self._total_state_size + self._state_param.get_target_total() - ) - self._trans_param.set_target_total( - self._trans_param.get_tpl_aux_size() * bound - ) - - for time in range(bound): - type(self)._generate_cnf_at_time( - fin_cnf, time, self._encode_trans_func, self._trans_param - ) - self.nvar += self._trans_param.get_target_total() - else: - self._trans_param.set_target_base( - self._total_state_size + self._state_param.get_target_total() - ) - self._trans_param.set_target_total(0) - self._trans_aux_base = ( - self._total_state_size + self._state_param.get_target_total() - ) - - # CNF-Encoding of init state property (state property at time 0) - base, naux, template_cnf = op.compute_cnf((self._prop_ini,)) - _ini_param = TplCnfParam( - tpl_base=base, - tpl_aux_size=naux, - template_cnf=template_cnf, - target_base=self._total_state_size - + self._state_param.get_target_total() - + self._trans_param.get_target_total(), - target_total=naux, - is_copied=False, - ) - - self._generate_cnf_at_time(fin_cnf, 0, self._encode_state_func, _ini_param) - self.nvar += _ini_param.get_target_total() - - # CNF-Encoding of final state property - base, naux, template_cnf = op.compute_cnf((self._prop_fin,)) - _fin_param = TplCnfParam( - tpl_base=base, - tpl_aux_size=naux, - template_cnf=template_cnf, - target_base=self._total_state_size - + self._state_param.get_target_total() - + self._trans_param.get_target_total() - + _ini_param.get_target_total(), - target_total=naux, - is_copied=False, - ) - - self._generate_cnf_at_time(fin_cnf, bound, self._encode_state_func, _fin_param) - self.nvar += _fin_param.get_target_total() - - self.cnf = tuple(fin_cnf) - - def _encode_state_func(self, lit: int, time: int, param: TplCnfParam) -> int: - """Encodes template literal (of state property) and time - into CNF literal. - - Args: - lit: literal in template CNF of state property - time: time of state - param: template CNF parameters - """ - var = abs(lit) - - tpl_base = param.get_tpl_base() - tpl_aux_size = param.get_tpl_aux_size() - target_base = param.get_target_base() - target_total = param.get_target_total() - - if not 0 < var <= tpl_base + tpl_aux_size: - raise IndexError - - if var > tpl_base: # var is aux - new_var = (var - tpl_base) + target_base - if param.is_copied(): - new_var += tpl_aux_size * time - assert target_base < new_var <= target_base + target_total, ( - f"target_base={target_base}, new_var={new_var}" - + "target_base+target_total={target_base+target_total}" - ) - return new_var if lit > 0 else -new_var - - else: - symbol_index = self.st.get_symbol_index(var) - assert symbol_index in self._varloc - - new_var = ( - self._varloc[symbol_index] * self.st.code_length - + self.st.get_code_pos(var) - + 1 - ) - assert new_var <= self._state_size - - new_var += self._state_size * time - assert 0 < new_var <= self._total_state_size - - return new_var if lit > 0 else -new_var - - def _encode_trans_func(self, lit: int, time: int, param: TplCnfParam) -> int: - """Encodes template literal (of transition relation) and time - into CNF literal. - - Args: - lit: literal in template CNF of transition relation - time: time of state - param: template CNF parameters - """ - var = abs(lit) - - tpl_base = param.get_tpl_base() - tpl_aux_size = param.get_tpl_aux_size() - target_base = param.get_target_base() - target_total = param.get_target_total() - - if not 0 < var <= tpl_aux_size + tpl_base: - raise IndexError - - if var > tpl_base: # i is aux var - new_var = (var - tpl_base) + target_base - if param.is_copied(): - new_var += tpl_aux_size * time - assert target_base < new_var <= target_base + target_total - return new_var if lit > 0 else -new_var - - else: - symbol_index = self.st.get_symbol_index(var) - assert symbol_index in self._varloc - - if self._curr_vars[self._varloc[symbol_index]] == symbol_index: - return self._encode_state_func(lit, time, self._state_param) - else: - assert symbol_index in self._next_vars - prev_symbol_index = self._curr_vars[self._varloc[symbol_index]] - - pos = self.st.get_code_pos(var) - prev_prop_var = self.st.get_boolean_var(prev_symbol_index, pos) - - new_var = self._encode_state_func( - prev_prop_var, time + 1, self._state_param - ) - assert 0 < new_var <= self._total_state_size - - new_lit = new_var if lit > 0 else -new_var - return new_lit - - def write_cnf(self, stream=None, dimacs: bool = True) -> None: - """Writes CNF in DIMACS CNF format. - - Args: - stream: stdout if not specified - """ - if self.cnf is None: - raise Exception("Generate CNF before writting") - - if dimacs is False: - raise Exception - - if stream is None: - stream = sys.stdout - - out = "" - out += f"p cnf {self.nvar} {len(self.cnf)}\n" - for clause in self.cnf: - out += " ".join(map(str, clause)) + " 0\n" - - if stream is not None: - stream.write(out) - - def _decode_time(self, cnf_lit: int) -> int: - """Decodes cnf literal into time.""" - cnf_var = abs(cnf_lit) - if cnf_var > self._total_state_size: - raise ValueError("Cannot decode cnf literal of aux variable") - - time = int((cnf_var - 1) / self._state_size) - return time - - def _decode_symbol_index(self, cnf_lit: int) -> int: - """Decodes cnf literal into first-order variable.""" - cnf_var = abs(cnf_lit) - if cnf_var > self._total_state_size: - raise ValueError("Cannot decode cnf literal of aux variable") - - var_pos = int(((cnf_var - 1) % self._state_size) / self.st.code_length) - assert 0 <= var_pos < self._nof_free_vars - - return self._curr_vars[var_pos] - - def _decode_code_pos(self, cnf_lit: int) -> int: - """Decodes cnf literal into code position.""" - cnf_var = abs(cnf_lit) - if cnf_var > self._total_state_size: - raise ValueError("Cannot decode cnf literal of aux variable") - - code_pos = ((cnf_var - 1) % self._state_size) % self.st.code_length - assert 0 <= code_pos < self.st.code_length - return code_pos - - def decode(self, assign: tuple) -> tuple: - """Decodes truth assignment of DIMACS CNF variables. - - Args: - assign: tuple of nonzero integers, each of which - represents a literal of DIMACS CNF. - - Returns: - tuple of nonzero integers, each of which represents an - propositional literal of Prop class. - """ - if self.cnf is None: - raise Exception("CNF is not yet computed") - - res = {} - - for lit in assign: - if type(lit) != int: - raise Exception("Non integer found") - if lit == 0: - raise Exception("0 not allowed") - if abs(lit) > self._total_state_size: - continue - time = self._decode_time(lit) - symbol_index = self._decode_symbol_index(lit) - pos = self._decode_code_pos(lit) - new_var = self.st.get_boolean_var(symbol_index, pos) - if time not in res: - res[time] = [] - res[time].append(new_var if lit > 0 else -new_var) - - return tuple([tuple(res[time]) for time in sorted(res.keys())]) - - # Check variable names in state and trans expression - @staticmethod - def _check_variable_names_in_expr(state_expr: Fog, trans_expr: Fog, prefix: str): - """Checks variable names.""" - state_vars = op.get_free_vars(state_expr) - - # None of free variable names in state_expr start with reserved prefix. - for i in state_vars: - name = NameMgr.lookup_name(i) - if re.match(f"^{prefix}", name): - raise Exception( - f"Variable name {name} in state expression" - + " must not start with {prefix}" - ) - - if trans_expr is not None: - state_var_names = [NameMgr.lookup_name(i) for i in state_vars] - - trans_vars = op.get_free_vars(trans_expr) - - for i in trans_vars: - if i in state_vars: - continue - - name = NameMgr.lookup_name(i) - remain = re.sub(f"^{prefix}", "", name) - if remain not in state_var_names: - raise Exception( - f"Variable {name} must not appear" + " in trans expression" - ) - - # Check variable names in init state and final state expressions - @staticmethod - def _check_variable_names_in_ini_fin_expr( - state_vars: tuple[int], ini_expr: Fog, fin_expr: Fog, st: GrSt - ): - """Checks variable names.""" - state_var_names = [NameMgr.lookup_name(i) for i in state_vars] - - # Init State Expression - if ini_expr is not None: - ini_vars = op.get_free_vars_and_consts(ini_expr) - # includes not only variables but also constants - ini_var_names = [ - NameMgr.lookup_name(i) for i in ini_vars if NameMgr.is_variable(i) - ] - ini_con_names = [ - NameMgr.lookup_name(i) for i in ini_vars if NameMgr.is_constant(i) - ] - assert len(ini_vars) == len(ini_var_names) + len(ini_con_names) - - for name in ini_var_names: - if name not in state_var_names: - raise Exception( - f"Variable {name} must not appear " + "in init state expression" - ) - - for name in ini_con_names: - if NameMgr.lookup_index(name) not in st.domain: - raise Exception( - f"Cannot intepret constant {name} " + "as domain element" - ) - - # Final State Expression - if fin_expr is not None: - fin_vars = op.get_free_vars_and_consts(fin_expr) - # includes not only variables but also constants - fin_var_names = [ - NameMgr.lookup_name(i) for i in fin_vars if NameMgr.is_variable(i) - ] - fin_con_names = [ - NameMgr.lookup_name(i) for i in fin_vars if NameMgr.is_constant(i) - ] - assert len(fin_vars) == len(fin_var_names) + len(fin_con_names) - - for name in fin_var_names: - if name not in state_var_names: - raise Exception( - f"Variable {name} must not appear " - + "in final state expression" - ) - - for name in fin_con_names: - if NameMgr.lookup_index(name) not in st.domain: - raise Exception( - f"Cannot intepret constant {name} " + "as domain element" - ) diff --git a/examples/recon.py b/examples/recon.py deleted file mode 100644 index ac660b3..0000000 --- a/examples/recon.py +++ /dev/null @@ -1,211 +0,0 @@ -"""Reconfiguration problem solver for first-order properties of graph vertices""" -import time - -from argparse import ArgumentParser -from pysat.formula import CNF -from pysat.solvers import Solver - -from pygplib import NameMgr, Fog, GrSt -from pygplib import op -from bmc import Bmc - - -def get_option(): - """Gets command-line options.""" - ap = ArgumentParser() - ap.add_argument("-b", "--bound", type=int, default=10, help="Specify maximum bound") - ap.add_argument( - "-t", - "--trans", - type=str, - default="TJ", - choices=["TS", "TJ"], - help="Specify transition relation", - ) - ap.add_argument( - "-e", - "--encoding", - type=str, - default="edge", - choices=["edge", "clique", "direct", "log"], - help="Specify ENCODING type", - ) - ap.add_argument("arg1", help="dimacs graph file") - ap.add_argument("arg2", help="formula file") - - return ap.parse_args() - -def parse_graph_file(filename: str) -> list[tuple[int, ...], ...]: - """Computes a graph file in DIMACS format. - - Returns: - list of pairs of nonzero positive integers with each pair representing - an edge. - """ - data = "" - with open(filename, "r", encoding="utf-8") as f: - data = f.read() - - res = [] - for line in data.split("\n"): - if len(line) == 0: - continue - if line[0] == "e": - v = int(line.split(" ")[1]) - w = int(line.split(" ")[2]) - if v == w: - raise Exception(f"loop ({v},{w}) found") - if w < v: - tmp = v - v = w - w = tmp - assert v < w - res.append((v, w)) - return res - -def parse_formula_file(file_name: str) -> tuple[str, str, str, str]: - """Parses formula-file.""" - with open(file_name, "r", encoding="utf-8") as f: - data = f.read() - - state_str = "" - trans_str = "" - ini_str = "" - fin_str = "" - for line in data.split("\n"): - if len(line) == 0: - continue - if line[0] == "s": - state_str = line[1:] - if line[0] == "t": - trans_str = line[1:] - if line[0] == "i": - ini_str = line[1:] - if line[0] == "f": - fin_str = line[1:] - return (state_str, trans_str, ini_str, fin_str) - - -def convert_ini_str(formula_str: str, variable_name: list) -> str: - """Converts initial state formula string if it is a sequence of integers. - - Input (formula_str) - 3 4 5 - - Input (variable_name) - ["x01", "x02", "x03", ...,"x10"] - - Output (equality as sequence) - x01=V3 & x02=V4 & x03=V5 - """ - for x in formula_str.split(" "): - if x == "": - continue - if f"{int(x)}" != x: - raise Exception() - vertex_name = ["V" + f"{x}" for x in formula_str.split(" ") if x != ""] - if len(vertex_name) > len(variable_name): - raise Exception("number of integers in init state is unmatching") - - return " & ".join([n + "=" + m for n, m in zip(variable_name, vertex_name)]) - - -def convert_fin_str(formula_str: str, variable_name: list) -> str: - """Converts final state formula string if it is a sequence of integers. - - Input (formula_str) - 2 7 6 - - Input (variable_name) - ["x01", "x02", "x03", ...,"x10"] - - Output (equality as set) - (x01=V2 | x01=V7 | x01=V6) & (x02=V2 | x02=V7 | x02=V6) & \ - (x03=V2 | x03=V7 | x03=V6) & (x01=V2 | x02=V2 | x03=V2) & \ - (x01=V7 | x02=V7 | x03=V7) & (x01=V6 | x02=V6 | x03=V6) - """ - for x in formula_str.split(" "): - if x == "": - continue - if f"{int(x)}" != x: - raise Exception() - vertex_name = ["V" + f"{x}" for x in formula_str.split(" ") if x != ""] - if len(vertex_name) > len(variable_name): - raise Exception("number of integers in fin state is unmatching") - - tmp = [] - # subset relation - for i in range(len(vertex_name)): - tmp.append("(" + " | ".join([variable_name[i] + "=" + n for n in vertex_name]) + ")") - # supset relation - for n in vertex_name: - tmp.append("(" + " | ".join([n + "=" + variable_name[i] for i in range(len(vertex_name))]) + ")") - return " & ".join(tmp) - - -start_time = time.time() - -args = get_option() -graph_file = args.arg1 -formula_file = args.arg2 -max_bound = args.bound -trans_type = args.trans -encoding_type = args.encoding - -edge_list = parse_graph_file(graph_file) -vertex_list = [] -for e in edge_list: - vertex_list.append(e[0]) - vertex_list.append(e[1]) -vertex_list = list(set(vertex_list)) - -phi = parse_formula_file(formula_file) -state_expr = Fog.read(phi[0]) -trans_expr = Fog.read(phi[1]) if phi[1] != "" else None -name_list = [NameMgr.lookup_name(v) for v in op.get_free_vars(state_expr)] -ini_expr = Fog.read(convert_ini_str(phi[2], name_list)) -fin_expr = Fog.read(convert_fin_str(phi[3], name_list)) -trans_type = "None" if phi[1] != "" else trans_type - -st = GrSt(vertex_list, edge_list, encoding=encoding_type) -bmc = Bmc(state_expr, trans_expr, ini_expr, fin_expr, st, trans_type=trans_type) - -solve_time = 0 -compile_time = 0 -solved = False -for step in range(0, max_bound): - start_compile = time.time() - bmc.generate_cnf(step) - end_compile = time.time() - compile_time += end_compile - start_compile - - cnf = CNF(from_clauses=bmc.cnf) - with Solver(bootstrap_with=cnf) as solver: - start_solve = time.time() - solved = solver.solve() - end_solve = time.time() - solve_time += end_solve - start_solve - - if solved: - model = solver.get_model() - else: - continue - - print("c SATISFIABLE") - ans = [] - for assign in bmc.decode(model): - res = st.decode_assignment(assign) - state = [st.object_to_vertex(res[key]) for key in res.keys()] - ans.append(state) - print("a " + " ".join(map(str, state))) - break - -if not solved: - print("c UNSATISFIABLE") - -end_time = time.time() -whole_time = end_time - start_time - -print(f"c compile_time {compile_time}") -print(f"c solve_time {solve_time}") -print(f"c whole_time {whole_time}") diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 41fe6c1..0000000 --- a/poetry.lock +++ /dev/null @@ -1,620 +0,0 @@ -[[package]] -name = "alabaster" -version = "0.7.13" -description = "A configurable sidebar-enabled Sphinx theme" -category = "main" -optional = true -python-versions = ">=3.6" - -[[package]] -name = "argparse" -version = "1.4.0" -description = "Python command-line parsing library" -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "babel" -version = "2.13.1" -description = "Internationalization utilities" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -dev = ["pytest (>=6.0)", "pytest-cov", "freezegun (>=1.0,<2.0)"] - -[[package]] -name = "cachetools" -version = "5.3.2" -description = "Extensible memoizing collections and decorators" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "certifi" -version = "2023.7.22" -description = "Python package for providing Mozilla's CA Bundle." -category = "main" -optional = true -python-versions = ">=3.6" - -[[package]] -name = "chardet" -version = "5.2.0" -description = "Universal encoding detector for Python 3" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" -optional = true -python-versions = ">=3.7.0" - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -category = "main" -optional = true -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" - -[[package]] -name = "coverage" -version = "7.3.2" -description = "Code coverage measurement for Python" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.dependencies] -tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} - -[package.extras] -toml = ["tomli"] - -[[package]] -name = "distlib" -version = "0.3.7" -description = "Distribution utilities" -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "docutils" -version = "0.20.1" -description = "Docutils -- Python Documentation Utilities" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "exceptiongroup" -version = "1.1.3" -description = "Backport of PEP 654 (exception groups)" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "filelock" -version = "3.13.1" -description = "A platform independent file lock." -category = "main" -optional = true -python-versions = ">=3.8" - -[package.extras] -docs = ["furo (>=2023.9.10)", "sphinx-autodoc-typehints (>=1.24)", "sphinx (>=7.2.6)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "pytest (>=7.4.3)"] -typing = ["typing-extensions (>=4.8)"] - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" -optional = true -python-versions = ">=3.5" - -[[package]] -name = "imagesize" -version = "1.4.1" -description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "main" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "importlib-metadata" -version = "6.8.0" -description = "Read metadata from Python packages" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "sphinx-lint", "jaraco.tidelift (>=1.4)"] -perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ruff", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "markupsafe" -version = "2.1.3" -description = "Safely add untrusted strings to HTML/XML markup." -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "packaging" -version = "23.2" -description = "Core utilities for Python packages" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "pallets-sphinx-themes" -version = "2.1.1" -description = "Sphinx themes for Pallets and related projects." -category = "main" -optional = true -python-versions = ">=3.8" - -[package.dependencies] -packaging = "*" -Sphinx = ">=3" - -[[package]] -name = "platformdirs" -version = "3.11.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx-autodoc-typehints (>=1.24)", "sphinx (>=7.1.1)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest (>=7.4)"] - -[[package]] -name = "pluggy" -version = "1.3.0" -description = "plugin and hook calling mechanisms for python" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pygments" -version = "2.16.1" -description = "Pygments is a syntax highlighting package written in Python." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pyparsing" -version = "3.1.1" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" -optional = false -python-versions = ">=3.6.8" - -[package.extras] -diagrams = ["railroad-diagrams", "jinja2"] - -[[package]] -name = "pyproject-api" -version = "1.6.1" -description = "API to interact with the python pyproject.toml based projects" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.dependencies] -packaging = ">=23.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -docs = ["furo (>=2023.8.19)", "sphinx-autodoc-typehints (>=1.24)", "sphinx (<7.2)"] -testing = ["covdefaults (>=2.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest (>=7.4)", "setuptools (>=68.1.2)", "wheel (>=0.41.2)"] - -[[package]] -name = "pytest" -version = "7.4.3" -description = "pytest: simple powerful testing with Python" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-cov" -version = "4.1.0" -description = "Pytest plugin for measuring coverage." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} -pytest = ">=4.6" - -[package.extras] -testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] - -[[package]] -name = "python-sat" -version = "0.1.8.dev10" -description = "A Python library for prototyping with SAT oracles" -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -six = "*" - -[package.extras] -aiger = ["py-aiger-cnf (>=2.0.0)"] -approxmc = ["pyapproxmc (>=4.1.8)"] -pblib = ["pypblib (>=0.0.3)"] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "sphinx" -version = "7.2.6" -description = "Python documentation generator" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -alabaster = ">=0.7,<0.8" -babel = ">=2.9" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18.1,<0.21" -imagesize = ">=1.3" -importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} -Jinja2 = ">=3.0" -packaging = ">=21.0" -Pygments = ">=2.14" -requests = ">=2.25.0" -snowballstemmer = ">=2.0" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = ">=2.0.0" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = ">=1.1.9" - -[package.extras] -docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "flake8-simplify", "isort", "ruff", "mypy (>=0.990)", "sphinx-lint", "docutils-stubs", "types-requests"] -test = ["pytest (>=4.6)", "html5lib", "cython (>=3.0)", "setuptools (>=67.0)", "filelock"] - -[[package]] -name = "sphinx-removed-in" -version = "0.2.2" -description = "versionremoved and removed-in directives for Sphinx" -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -Sphinx = "*" - -[[package]] -name = "sphinxcontrib-applehelp" -version = "1.0.7" -description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -Sphinx = ">=5" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-devhelp" -version = "1.0.5" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -Sphinx = ">=5" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-htmlhelp" -version = "2.0.4" -description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -Sphinx = ">=5" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest", "html5lib"] - -[[package]] -name = "sphinxcontrib-jsmath" -version = "1.0.1" -description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "main" -optional = true -python-versions = ">=3.5" - -[package.extras] -test = ["pytest", "flake8", "mypy"] - -[[package]] -name = "sphinxcontrib-qthelp" -version = "1.0.6" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -Sphinx = ">=5" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-serializinghtml" -version = "1.1.9" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" -category = "main" -optional = true -python-versions = ">=3.9" - -[package.dependencies] -Sphinx = ">=5" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-trio" -version = "1.1.2" -description = "Make Sphinx better at documenting Python functions and methods" -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -sphinx = ">=1.7" - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "tox" -version = "4.11.3" -description = "tox is a generic virtualenv management and test command line tool" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.dependencies] -cachetools = ">=5.3.1" -chardet = ">=5.2" -colorama = ">=0.4.6" -filelock = ">=3.12.3" -packaging = ">=23.1" -platformdirs = ">=3.10" -pluggy = ">=1.3" -pyproject-api = ">=1.6.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} -virtualenv = ">=20.24.3" - -[package.extras] -docs = ["furo (>=2023.8.19)", "sphinx-argparse-cli (>=1.11.1)", "sphinx-autodoc-typehints (>=1.24)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinx (>=7.2.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -testing = ["build[virtualenv] (>=0.10)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.1.1)", "devpi-process (>=1)", "diff-cover (>=7.7)", "distlib (>=0.3.7)", "flaky (>=3.7)", "hatch-vcs (>=0.3)", "hatchling (>=1.18)", "psutil (>=5.9.5)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-xdist (>=3.3.1)", "pytest (>=7.4)", "re-assert (>=1.1)", "time-machine (>=2.12)", "wheel (>=0.41.2)"] - -[[package]] -name = "urllib3" -version = "2.0.7" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "virtualenv" -version = "20.24.6" -description = "Virtual Python Environment builder" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -distlib = ">=0.3.7,<1" -filelock = ">=3.12.2,<4" -platformdirs = ">=3.9.1,<4" - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx-argparse (>=0.4)", "sphinx (>=7.1.2)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage-enable-subprocess (>=1)", "coverage (>=7.2.7)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "pytest (>=7.4)", "setuptools (>=68)", "time-machine (>=2.10)"] - -[[package]] -name = "zipp" -version = "3.17.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" -optional = true -python-versions = ">=3.8" - -[package.extras] -docs = ["sphinx (>=3.5)", "sphinx (<7.2.5)", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "furo", "sphinx-lint", "jaraco.tidelift (>=1.4)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ruff", "jaraco.itertools", "jaraco.functools", "more-itertools", "big-o", "pytest-ignore-flaky", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] - -[extras] -docs = ["Sphinx", "sphinx-removed-in", "sphinxcontrib-trio", "pallets-sphinx-themes"] -test = ["pytest", "pytest-cov", "tox", "python-sat", "argparse"] - -[metadata] -lock-version = "1.1" -python-versions = "^3.9" -content-hash = "801b2f7c9341048d09cbe7ae06b892b1d44b0ba354a7abe5b27419bb2bc13383" - -[metadata.files] -alabaster = [] -argparse = [] -babel = [] -cachetools = [] -certifi = [] -chardet = [] -charset-normalizer = [] -colorama = [] -coverage = [] -distlib = [] -docutils = [] -exceptiongroup = [] -filelock = [] -idna = [] -imagesize = [] -importlib-metadata = [] -iniconfig = [] -jinja2 = [] -markupsafe = [] -packaging = [] -pallets-sphinx-themes = [] -platformdirs = [] -pluggy = [] -pygments = [] -pyparsing = [] -pyproject-api = [] -pytest = [] -pytest-cov = [] -python-sat = [] -requests = [] -six = [] -snowballstemmer = [] -sphinx = [] -sphinx-removed-in = [] -sphinxcontrib-applehelp = [] -sphinxcontrib-devhelp = [] -sphinxcontrib-htmlhelp = [] -sphinxcontrib-jsmath = [] -sphinxcontrib-qthelp = [] -sphinxcontrib-serializinghtml = [] -sphinxcontrib-trio = [] -tomli = [] -tox = [] -urllib3 = [] -virtualenv = [] -zipp = [] diff --git a/pygplib/__init__.py b/pygplib/__init__.py index 2ed8428..a5d2223 100644 --- a/pygplib/__init__.py +++ b/pygplib/__init__.py @@ -12,6 +12,7 @@ from .cnf import Cnf from .ecc import Ecc from .name import NameMgr +from .absexpr import IndexGen __all__ = [ "Prop", @@ -22,4 +23,5 @@ "BaseRelSt", "SymRelSt", "op", + "IndexGen", ] diff --git a/pygplib/absexpr.py b/pygplib/absexpr.py index 4f4800f..84a6459 100644 --- a/pygplib/absexpr.py +++ b/pygplib/absexpr.py @@ -30,6 +30,7 @@ def get_count(self) -> int: def clear(self, init_index: int = 1) -> None: """Clears index with init_index.""" + self._init = init_index self._next = init_index @@ -55,8 +56,6 @@ class AbsExpr: _COMMA: string to represent comma. _LPAREN: string to represent left parentheses. _RPAREN: string to represent right parentheses. - _ATOM_LEN: length of atom part in aux field. - _AUX_LEN: length of aux field. Note: The current implementation, for the sake of simplicity, @@ -78,9 +77,6 @@ class AbsExpr: _LPAREN = "(" _RPAREN = ")" - _ATOM_LEN = 0 - _AUX_LEN = _ATOM_LEN - @classmethod def get_lparen_tag(cls) -> str: """Gets tag of left parenthesis.""" @@ -98,15 +94,15 @@ def get_comma_tag(cls) -> str: # Constructor-related Methods @staticmethod - def _to_key(tag: str, left: "AbsExpr", right: "AbsExpr", aux: tuple) -> str: + def _to_key(tag: str, left: "AbsExpr", right: "AbsExpr", aux: tuple, cls_name: str) -> str: """Makes key to identify expression.""" - tup = (tag, id(left), id(right)) + aux + tup = (tag, id(left), id(right)) + aux + (cls_name,) return ",".join(map(str, tup)) @classmethod - def _normalize_aux(cls, aux: tuple) -> tuple: + def _normalize_aux(cls, tag: str, aux: tuple) -> tuple: """This method might be overridden to normalize atom operands' order""" - return (0,) * cls._AUX_LEN if aux == () else aux + return aux def __new__( cls, tag: str, left: "AbsExpr" = None, right: "AbsExpr" = None, aux: tuple = () @@ -122,8 +118,10 @@ def __new__( right: right operand aux: information regarding bound variable and atom """ - aux = cls._normalize_aux(aux) - key = cls._to_key(tag, left, right, aux) + aux = cls._normalize_aux(tag, aux) + # Add class name to distinguish between formulas of different classes: + # for instance, Prop.true_const() from Fog.true_const(). + key = cls._to_key(tag, left, right, aux, cls_name = cls.__name__) if key in cls._unique_table: return cls._unique_table[key] @@ -156,14 +154,32 @@ def get_operand(self, i: int) -> "AbsExpr": return self._left if i == 1 else self._right def is_atom_term(self) -> bool: + """Is it an atomic formula ?""" + warn_msg = "`is_atom_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_atom() + + def is_atom(self) -> bool: """Is it an atomic formula ?""" return self.get_tag() in type(self)._ATOM_TAGS def is_unop_term(self) -> bool: + """Is the top-most operator a unary operation ?""" + warn_msg = "`is_unop_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_unop() + + def is_unop() -> bool: """Is the top-most operator a unary operation ?""" return self.get_tag() in type(self)._UNOP_TAGS def is_binop_term(self) -> bool: + """Is the top-most operator a binary operation ?""" + warn_msg = "`is_binop_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_binop() + + def is_binop(self) -> bool: """Is the top-most operator a binary operation ?""" return self.get_tag() in type(self)._BINOP_TAGS @@ -175,7 +191,8 @@ def gen_key(self) -> str: (as keys for _unique_table) to decide whether formulas are syntatically identical. """ - return type(self)._to_key(self._tag, self._left, self._right, self._aux) + return type(self)._to_key(self._tag, self._left, self._right,\ + self._aux, cls_name=type(self).__name__) def compute_nnf_step(self, negated: bool, s: list[list], t: list[list])\ -> None: diff --git a/pygplib/absfo.py b/pygplib/absfo.py index e9a03b8..7e80d25 100644 --- a/pygplib/absfo.py +++ b/pygplib/absfo.py @@ -9,6 +9,15 @@ class AbsFo(AbsProp): """First-order logic of graphs with true and false and no other atoms. + :: + + Node | self._aux[0] | self._aux[1] + ---------------|----------------|-------------- + True constant | - | - + False constant | - | - + Quantifier | index of | - + | bound variable | + Note: In the current implementation, syntactically identical subexpressions are shared. We have to keep it in mind that this might become source of @@ -31,8 +40,6 @@ class AbsFo(AbsProp): _UNOP_TAGS: tuple of strings, representing types of uniary operator. _QF_TAGS: tuple of strings, representing types of quantifier. _EXPR_TAGS: tuple of available tags in this class. - _BOUND_VAR_POS: beginning position of the field of bound variable. - _AUX_LEN: length of aux field. """ # Tag-related Variables and Methods @@ -46,9 +53,6 @@ class AbsFo(AbsProp): _EXPR_TAGS = _ATOM_TAGS + _BINOP_TAGS + _UNOP_TAGS + _QF_TAGS - _BOUND_VAR_POS = AbsProp._AUX_LEN - _AUX_LEN = 1 + AbsProp._AUX_LEN - @classmethod def get_forall_tag(cls) -> str: """Gets tag of forall.""" @@ -102,19 +106,37 @@ def qf(cls, tag: str, left: AbsExpr, bvar: int) -> AbsExpr: # Instance Methods def get_bound_var(self) -> int: """Gets the index of bound variable.""" - if not self.is_qf_term(): + if not self.is_qf(): raise Exception("The top-most operator of the formula is not quantifier.") - return self._aux[type(self)._BOUND_VAR_POS] + return self._aux[0] def is_forall_term(self) -> bool: + """Is the top-most operator the universal quantifier ?""" + warn_msg = "`is_forall_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_forall() + + def is_forall(self) -> bool: """Is the top-most operator the universal quantifier ?""" return self.get_tag() == type(self)._FORALL def is_exists_term(self) -> bool: + """Is the top-most operator the existential quantifier ?""" + warn_msg = "`is_exists_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_exists() + + def is_exists(self) -> bool: """Is the top-most operator the existential quantifier ?""" return self.get_tag() == type(self)._EXISTS def is_qf_term(self) -> bool: + """Is the top-most operator universal or existential quantifier ?""" + warn_msg = "`is_qf_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_qf() + + def is_qf(self) -> bool: """Is the top-most operator universal or existential quantifier ?""" return self.get_tag() in type(self)._QF_TAGS @@ -123,7 +145,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: f = self.get_operand(1) - if self.is_forall_term(): # not ! f = ? not f + if self.is_forall(): # not ! f = ? not f bvar = self.get_bound_var() t.append( [ @@ -136,7 +158,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: s.append([negated, f]) return - if self.is_exists_term(): # not ? f = ! not f + if self.is_exists(): # not ? f = ! not f bvar = self.get_bound_var() t.append( [ @@ -155,7 +177,7 @@ def get_free_vars_and_consts_pre_step( self, bound_vars: list, free_vars: list ) -> None: """Performs computation in prefix order for this object.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): bound_vars.append(self.get_bound_var()) return @@ -163,14 +185,14 @@ def get_free_vars_and_consts_post_step( self, bound_vars: list, free_vars: list ) -> None: """Performs computation in postfix order for this object.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): bound_vars.remove(self.get_bound_var()) return def substitute_step(self, y: int, x: int, assoc: dict) -> None: """Performs substitution for this object.""" - if self.is_neg_term(): + if self.is_neg(): g = assoc[id(self.get_operand(1))] assoc[id(self)] = type(self).neg(g) return @@ -180,17 +202,17 @@ def substitute_step(self, y: int, x: int, assoc: dict) -> None: return if ( - self.is_land_term() - or self.is_lor_term() - or self.is_implies_term() - or self.is_iff_term() + self.is_land() + or self.is_lor() + or self.is_implies() + or self.is_iff() ): left = assoc[id(self.get_operand(1))] right = assoc[id(self.get_operand(2))] assoc[id(self)] = type(self).binop(self.get_tag(), left, right) return - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): bvar = self.get_bound_var() if bvar == x: assoc[id(self)] = self @@ -204,7 +226,7 @@ def substitute_step(self, y: int, x: int, assoc: dict) -> None: def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: """Performs reduce compuation for this object.""" - if self.is_forall_term(): + if self.is_forall(): bvar = self.get_bound_var() g = assoc[id(self.get_operand(1))] @@ -226,7 +248,7 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: assoc[id(self)] = type(self).forall(g, bvar) return - if self.is_exists_term(): + if self.is_exists(): bvar = self.get_bound_var() g = assoc[id(self.get_operand(1))] @@ -252,10 +274,10 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: def make_str_pre_step(self) -> str: """Makes string in prefix order for this object.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): out = "(" out += ( \ - type(self).get_forall_tag() if self.is_forall_term() \ + type(self).get_forall_tag() if self.is_forall() \ else type(self).get_exists_tag()\ ) out += " " @@ -266,20 +288,20 @@ def make_str_pre_step(self) -> str: def make_str_in_step(self) -> str: """Makes string in infix order for this object.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): return "" return super().make_str_in_step() def make_str_post_step(self) -> str: """Makes string in postfix order for this object.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): return ")" return super().make_str_post_step() def make_node_str_step(self) -> str: """Makes string of this object for DOT print.""" - if self.is_forall_term() or self.is_exists_term(): + if self.is_forall() or self.is_exists(): bvar = self.get_bound_var() name = NameMgr.lookup_name(bvar) - return f'"! [{name}] :"' if self.is_forall_term() else f'"? [{name}] :"' + return f'"! [{name}] :"' if self.is_forall() else f'"? [{name}] :"' return super().make_node_str_step() diff --git a/pygplib/absneg.py b/pygplib/absneg.py index ad84a2c..6441e98 100644 --- a/pygplib/absneg.py +++ b/pygplib/absneg.py @@ -7,6 +7,13 @@ class AbsNeg(AbsExpr): """Class of abstract expressions with true and false and negation only. + :: + + Node | self._aux[0] | self._aux[1] + ---------------|----------------|-------------- + True constant | - | - + False constant | - | - + Attributes: _NEG: string, representing logical negation. _TRUE_CONST: string, representing true. @@ -65,25 +72,31 @@ def false_const(cls) -> AbsExpr: # Instance Methods def is_neg_term(self) -> bool: + """Is the top-most operator the logical negation ?""" + warn_msg = "`is_neg_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_neg() + + def is_neg(self) -> bool: """Is the top-most operator the logical negation ?""" return self.get_tag() == type(self)._NEG def is_true_atom(self) -> bool: - """Is it the true atom ?""" + """Is it the true constant atom ?""" return self.get_tag() == type(self)._TRUE_CONST def is_false_atom(self) -> bool: - """Is it the false atom ?""" + """Is it the false constant atom ?""" return self.get_tag() == type(self)._FALSE_CONST def is_const_atom(self) -> bool: - """Is it either the true atom or the false atom ?""" + """Is it either the true constant atom or the false constant atom ?""" return self.is_true_atom() or self.is_false_atom() def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: """Perform NNF computation for this object.""" - if self.is_neg_term(): # not not f = f + if self.is_neg(): # not not f = f f = self.get_operand(1) s.append([not negated, f]) t.append([1, lambda x: x]) @@ -101,7 +114,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: def compute_cnf_step(self, igen: IndexGen, \ assoc: dict, cnf: list) -> None: """Performs CNF computation for this object.""" - if self.is_neg_term(): # a <-> -b : (-a or -b) and (a or b) + if self.is_neg(): # a <-> -b : (-a or -b) and (a or b) a = igen.get_next() b = assoc[id(self.get_operand(1))] @@ -118,7 +131,7 @@ def compute_cnf_step(self, igen: IndexGen, \ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: """Performs reduce computation for this object.""" - if self.is_neg_term(): + if self.is_neg(): g = assoc[id(self.get_operand(1))] if g.is_true_atom(): assoc[id(self)] = type(self).false_const() @@ -135,7 +148,7 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: def make_str_pre_step(self) -> str: """Makes string in prefix order for this object.""" - if self.is_neg_term(): + if self.is_neg(): out = "(" + type(self).get_neg_tag() + " " return out if self.is_true_atom(): @@ -146,7 +159,7 @@ def make_str_pre_step(self) -> str: def make_str_in_step(self) -> str: """Makes string in infix order for this object.""" - if self.is_neg_term(): + if self.is_neg(): return "" if self.is_true_atom(): return "" @@ -156,7 +169,7 @@ def make_str_in_step(self) -> str: def make_str_post_step(self) -> str: """Makes string in postfix order for this object.""" - if self.is_neg_term(): + if self.is_neg(): return ")" if self.is_true_atom(): return "" @@ -166,7 +179,7 @@ def make_str_post_step(self) -> str: def make_node_str_step(self) -> str: """Makes string of this object for DOT print.""" - if self.is_neg_term(): + if self.is_neg(): return "NOT" if self.is_true_atom(): return "T" diff --git a/pygplib/absprop.py b/pygplib/absprop.py index 44fde47..e1e4778 100644 --- a/pygplib/absprop.py +++ b/pygplib/absprop.py @@ -12,6 +12,13 @@ class AbsProp(AbsNeg): """Expression of Propositional Logic with true and false and no variable + :: + + Node | self._aux[0] | self._aux[1] + ---------------|----------------|-------------- + True constant | - | - + False constant | - | - + Attributes: _LAND: string, representing logical conjunction. _LOR: string, representing logical disjunction. @@ -162,18 +169,42 @@ def bitwise_binop(cls, tag: str, left_li: list, right_li: list) -> AbsExpr: # Instance Methods def is_land_term(self) -> bool: + """Is the top-most operator logical conjunction ?""" + warn_msg = "`is_land_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_land() + + def is_land(self) -> bool: """Is the top-most operator logical conjunction ?""" return self.get_tag() == type(self)._LAND def is_lor_term(self) -> bool: + """Is the top-most operator logical disjunction ?""" + warn_msg = "`is_lor_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_lor() + + def is_lor(self) -> bool: """Is the top-most operator logical disjunction ?""" return self.get_tag() == type(self)._LOR def is_implies_term(self) -> bool: + """Is the top-most operator logical implication ?""" + warn_msg = "`is_implies_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_implies() + + def is_implies(self) -> bool: """Is the top-most operator logical implication ?""" return self.get_tag() == type(self)._IMPLIES def is_iff_term(self) -> bool: + """Is the top-most operator logical equivalence ?""" + warn_msg = "`is_iff_term()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.is_iff() + + def is_iff(self) -> bool: """Is the top-most operator logical equivalence ?""" return self.get_tag() == type(self)._IFF @@ -183,7 +214,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: f = self.get_operand(1) g = self.get_operand(2) - if self.is_land_term(): + if self.is_land(): if negated: # not (f and g) = not f or not g t.append([2, lambda y, z: type(self).lor(y, z)]) @@ -197,7 +228,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: s.append([False, g]) return - if self.is_lor_term(): + if self.is_lor(): if negated: # not (f or g) = not f and not g t.append([2, lambda y, z: type(self).land(y, z)]) @@ -211,7 +242,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: s.append([False, g]) return - if self.is_implies_term(): + if self.is_implies(): if negated: # not (f -> g) = f and not g t.append([2, lambda y, z: type(self).land(y, z)]) @@ -225,7 +256,7 @@ def compute_nnf_step(self, negated: bool, s: list[list], t: list[list]) -> None: s.append([False, g]) return - if self.is_iff_term(): + if self.is_iff(): if negated: # not (f <-> g) = not (f -> g) or not (g -> f) t.append([2, lambda y, z: type(self).lor(y, z)]) @@ -246,7 +277,7 @@ def compute_cnf_step(self, igen: IndexGen, \ """Performs CNF computation for this object.""" # a <-> b and c: (-a or b) and (-a or c) and (a or -b or -c) - if self.is_land_term(): + if self.is_land(): a = igen.get_next() b = assoc[id(self.get_operand(1))] c = assoc[id(self.get_operand(2))] @@ -259,7 +290,7 @@ def compute_cnf_step(self, igen: IndexGen, \ return # a <-> b or c: (a or -c) and (a or -b) and (-a or b or c) - if self.is_lor_term(): + if self.is_lor(): a = igen.get_next() b = assoc[id(self.get_operand(1))] c = assoc[id(self.get_operand(2))] @@ -272,13 +303,13 @@ def compute_cnf_step(self, igen: IndexGen, \ return assert ( - not self.is_implies_term() and not self.is_iff_term() + not self.is_implies() and not self.is_iff() ), "compute_cnf_step() assumes reduced formulas" super().compute_cnf_step(igen, assoc, cnf) def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: """Performs reduce computation for this object.""" - if self.is_land_term(): + if self.is_land(): left = assoc[id(self.get_operand(1))] right = assoc[id(self.get_operand(2))] @@ -305,7 +336,7 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: assoc[id(self)] = type(self).land(left, right) return - if self.is_lor_term(): + if self.is_lor(): left = assoc[id(self.get_operand(1))] right = assoc[id(self.get_operand(2))] @@ -332,50 +363,108 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: assoc[id(self)] = type(self).lor(left, right) return + if self.is_implies(): + left = assoc[id(self.get_operand(1))] + right = assoc[id(self.get_operand(2))] + + if left.is_true_atom(): # T -> right = right + assoc[id(self)] = right + return + + if left.is_false_atom(): # F -> right = T + assoc[id(self)] = type(self).true_const() + return + + if right.is_true_atom(): # left -> T = T + assoc[id(self)] = type(self).true_const() + return + + if right.is_false_atom(): # left -> F = ~left + assoc[id(self)] = type(self).neg(left) + return + + if left == right: # left -> left = T + assoc[id(self)] = type(self).true_const() + return + + # left -> right = ~left | right + assoc[id(self)] = type(self).lor(type(self).neg(left), right) + return + + if self.is_iff(): + left = assoc[id(self.get_operand(1))] + right = assoc[id(self.get_operand(2))] + + if left.is_true_atom(): # T <-> right = right + assoc[id(self)] = right + return + + if left.is_false_atom(): # F <-> right = ~right + assoc[id(self)] = type(self).neg(right) + return + + if right.is_true_atom(): # left <-> T = left + assoc[id(self)] = left + return + + if right.is_false_atom(): # left <-> F = ~left + assoc[id(self)] = type(self).neg(left) + return + + if left == right: # left <-> left = T + assoc[id(self)] = type(self).true_const() + return + + # left <-> right = (~left | right) & (left | ~right) + assoc[id(self)] = type(self).land( + type(self).lor(type(self).neg(left), right), + type(self).lor(left, type(self).neg(right))) + return + super().reduce_formula_step(assoc, st) def make_str_pre_step(self) -> str: """Makes string in prefix order for this object.""" if ( - self.is_land_term() - or self.is_lor_term() - or self.is_implies_term() - or self.is_iff_term() + self.is_land() + or self.is_lor() + or self.is_implies() + or self.is_iff() ): return "(" return super().make_str_pre_step() def make_str_in_step(self) -> str: """Makes string in infix order for this object.""" - if self.is_land_term(): + if self.is_land(): return " " + type(self).get_land_tag() + " " - if self.is_lor_term(): + if self.is_lor(): return " " + type(self).get_lor_tag() + " " - if self.is_implies_term(): + if self.is_implies(): return " " + type(self).get_implies_tag() + " " - if self.is_iff_term(): + if self.is_iff(): return " " + type(self).get_iff_tag() + " " return super().make_str_in_step() def make_str_post_step(self) -> str: """Makes string in postfix order for this object.""" if ( - self.is_land_term() - or self.is_lor_term() - or self.is_implies_term() - or self.is_iff_term() + self.is_land() + or self.is_lor() + or self.is_implies() + or self.is_iff() ): return ")" return super().make_str_post_step() def make_node_str_step(self) -> str: """Makes string of this object for DOT print.""" - if self.is_land_term(): + if self.is_land(): return "AND" - if self.is_lor_term(): + if self.is_lor(): return "OR" - if self.is_implies_term(): + if self.is_implies(): return "IMP" - if self.is_iff_term(): + if self.is_iff(): return "IFF" return super().make_node_str_step() diff --git a/pygplib/baserelst.py b/pygplib/baserelst.py index 25bf6f2..73da9e1 100644 --- a/pygplib/baserelst.py +++ b/pygplib/baserelst.py @@ -16,7 +16,8 @@ class BaseRelSt(Be): """ def __init__(self, \ - objects: tuple[int], codes: tuple[tuple[int]], length: int): + objects: tuple[int], codes: tuple[tuple[int]], length: int,\ + msg: str = ""): """Initializes an object of BaseRelSt class.""" self.domain = objects """tuple of objects (constant symbol indices)""" @@ -29,7 +30,7 @@ def __init__(self, \ for pos, tup in enumerate(self._codes): if tup in self._pos: raise Exception(f"the codes of position {self._pos[tup]} "\ - +f"and {pos} coincides: {self._codes}") + +f"and {pos} coincides: {self._codes}: "+msg) self._pos[tup] = pos super().__init__(length) @@ -55,33 +56,33 @@ def decode_assignment(self, assign: tuple[int]) -> dict[int, int]: """ var_symb_set = { - self.get_symbol_index(abs(x)) \ - for x in assign if self.exists_symbol(abs(x))\ + self.get_variable_position_pair(abs(x))[0] \ + for x in assign if self.is_decodable_boolean_var(abs(x))\ } dic = {} # dic to find set of code pos of value 1 from symbol index - for var in var_symb_set: - for bvar in self.get_boolean_var_list(var): - if (bvar in assign) and (-bvar in assign): - raise Exception(f"Conflicting assign w.r.t {bvar}") - if bvar in assign: - if var not in dic: - dic[var] = set() - pos = self.get_code_pos(bvar) - dic[var].add(pos+1) + for x in var_symb_set: + for px in self.get_boolean_var_list(x): + if (px in assign) and (-px in assign): + raise Exception(f"Conflicting assign w.r.t {px}") + if px in assign: + if x not in dic: + dic[x] = set() + pos = self.get_variable_position_pair(px)[1] + dic[x].add(pos+1) continue - if -bvar in assign: + if -px in assign: continue - raise Exception("Incomplete assign w.r.t {bvar}") + raise Exception(f"Incomplete assign w.r.t {px}: {NameMgr.lookup_name(x)}") result = {} - for var in var_symb_set: - if var not in dic: - dic[var] = set() - code = tuple(sorted(dic[var])) + for x in var_symb_set: + if x not in dic: + dic[x] = set() + code = tuple(sorted(dic[x])) if code not in self._pos: raise Exception("No matching element") - result[var] = self._pos_to_object(self._pos[code]) + result[x] = self._pos_to_object(self._pos[code]) return result diff --git a/pygplib/be.py b/pygplib/be.py index 13a0970..b9d1b26 100644 --- a/pygplib/be.py +++ b/pygplib/be.py @@ -1,51 +1,87 @@ -"""Class of Boolean encoding of first-order variables""" +"""Class of Boolean encoding of variables""" from .name import NameMgr class Be: - """Class of Boolean encoding of first-order variables + """Class of Boolean encoding of variables - A first-order variable is encoded with a sequence of Boolean variables of + A variable is encoded to a sequence of Boolean variables of fixed-length. - Each such Boolean variable is registered to NameMgr as name x@i, where x - is the name of the first-order variable from which the Boolean variable is - encoded and i is the position. """ def __init__(self, length: int): """Initializes an object of Be class""" self.code_length = length + """length of boolean encoding of a variable""" + self._be_encode_dict = {} + """dictionary mapping variable-position pair to Boolean variable""" + self._be_decode_dict = {} + """dictionary mapping Boolean variable to variable-position pair""" def get_boolean_var(self, i: int, pos: int) -> int: - """Returns a Boolean variable index, given a symbol index and code pos. + """Returns a Boolean encoding of a variable at a given position. Args: - i: variable or constant symbol index + i: index of variable symbol pos: code position ranging from 0 to the code length minus 1. + + Returns: + index of Boolean variable """ if not NameMgr.has_name(i): raise ValueError(f"{i} has no name") - if "@" in NameMgr.lookup_name(i): - raise Exception( - f"The name {NameMgr.lookup_name(i)} of {i} should not include @" - ) + if not NameMgr.is_variable(i): + raise ValueError(f"{i} is not a variable") if not 0 <= pos < self.code_length: raise IndexError( f"Position must range from 0 to {self.code_length-1}" ) - - name = NameMgr.lookup_name(i) + "@" + str(pos + 1) - return NameMgr.lookup_index(name) + if (i,pos) not in self._be_encode_dict: + res = NameMgr.get_aux_index() + assert not self.is_decodable_boolean_var(res) + self._be_encode_dict[(i,pos)] = res + self._be_decode_dict[res] = (i,pos) + return self._be_encode_dict[(i,pos)] def get_boolean_var_list(self, i: int) -> list[int]: - """Returns a list of Boolean variable indices, given a symbol index. + """Returns a Boolean encoding of a variable. Args: - i: variable or constant symbol index + i: index of variable symbol + + Returns: + list of Boolean variables """ return [self.get_boolean_var(i,pos) for pos in range(self.code_length)] + def get_variable_position_pair(self, k: int) -> tuple[int,int]: + """Decode a Boolean variable to a variable and a position. + + Args: + k: index of Boolean variable + + Returns: + index of a variable symbol and a position + """ + if not self.is_decodable_boolean_var(k): + raise ValueError(f"{k} undecodable") + return self._be_decode_dict[k] + + def is_decodable_boolean_var(self, k: int) -> bool: + """Is it a decodable Boolean variable? + + Args: + k: index of Boolean variable + + Returns: + bool: returns True if for some i, k == get_boolean_var(i, j). + + Note: + auxiliary propositional variables are not decodable. + """ + return k in self._be_decode_dict + def get_symbol_index(self, k: int) -> int: """Returns symbol index, given Boolean variable index. @@ -55,12 +91,9 @@ def get_symbol_index(self, k: int) -> int: Returns: int: variable or constant symbol index """ - if not self.exists_symbol(k): - raise ValueError(f"No symbol is linked to {k}") - - name = NameMgr.lookup_name(k) - res = name.split("@") - return NameMgr.lookup_index(res[0]) + warn_msg = "`get_symbol_index()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.get_variable_position_pair(k)[0] def get_code_pos(self, k: int) -> int: """Returns code position, given Boolean variable index. @@ -71,12 +104,9 @@ def get_code_pos(self, k: int) -> int: Returns: int: code position ranging from 0 to the code length minus 1. """ - if not self.exists_symbol(k): - raise ValueError(f"No symbol is linked to {k}") - - name = NameMgr.lookup_name(k) - res = name.split("@") - return int(res[1]) - 1 + warn_msg = "`get_code_pos()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.get_variable_position_pair(k)[1] def exists_symbol(self, k: int) -> bool: """Answers whether there exists symbol index, given Boolean var. index. @@ -87,9 +117,9 @@ def exists_symbol(self, k: int) -> bool: Returns: bool: returns True if for some i, k == get_boolean_var(i, j). """ - name = NameMgr.lookup_name(k) - res = name.split("@") - return len(res) == 2 and NameMgr.has_index(res[0]) and res[1].isdigit() + warn_msg = "`exists_symbol()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + raise Exception def exists_boolean_var(self, i: int, pos: int) -> bool: """Answers whether there is a Boolean var k, given symbol index and pos. @@ -101,8 +131,6 @@ def exists_boolean_var(self, i: int, pos: int) -> bool: Returns: bool: returns True if for some k, k == get_boolean_var(i, pos). """ - if not NameMgr.has_name(i): - raise ValueError(f"No symbol is linked to {i}") - - name = NameMgr.lookup_name(i) + "@" + str(pos + 1) - return NameMgr.has_index(name) + warn_msg = "`exists_boolean_var()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + raise Exception diff --git a/pygplib/cnf.py b/pygplib/cnf.py index a14b184..b0f7b93 100644 --- a/pygplib/cnf.py +++ b/pygplib/cnf.py @@ -1,127 +1,162 @@ """CNF Converter Class""" import sys -from typing import overload +from typing import Iterable from .name import NameMgr from .baserelst import BaseRelSt +from .absexpr import IndexGen from . import op class Cnf: """CNF Converter Class""" - @overload - def __init__(self, expr_li: list, st: BaseRelSt = None): - pass - - @overload - def __init__(self, expr_tup: tuple, st: BaseRelSt = None): - pass - - def __init__(self, exprs, st = None): + def __init__(self, li: Iterable, + st: BaseRelSt = None, + check_cnf: bool = False): """Constructs CNF from tuple/list of formulas with Tseitin transformation. Note: - A tuple/list of propositional formulas is considered as conjunction + A list of propositional formulas is considered as conjunction of propositional formulas. Args: - exprs: tuple/list of propositional formulas - st: relational structure object (If given, coding information will - be added to the header of DIMACS CNF) + li: list of propositional formulas + st: relational structure object """ - self.st = st - """structure (optional)""" - - self._dic = {} - """dict to find encoded index from original index.""" - self._inv_list = [] - """list to find original index from encoded index.""" - - self._expr_tup = tuple(exprs) - """tuple of propositional formulas""" - base, naux, cnf = op.compute_cnf(self._expr_tup) - - if cnf == (): - self._nvar = 0 - self._ncls = 0 - self._packed_cnf = () + self._st = st + """relational structure""" + self._orig_base, naux, self._orig_cnf = op.compute_cnf(li) + self._enc = {} + """dictionary of encoded indices""" + self._dec = {} + """dictionary of decoded indices""" + self.base = 0 + """number of decodable variables""" + for clause in self._orig_cnf: + for lit in clause: + var = abs(lit) + if self._st is not None\ + and var <= self._orig_base\ + and self._st.is_decodable_boolean_var(var)\ + and var not in self._enc: + self.base += 1 + self._enc[var] = self.base + self._dec[self.base] = var + self._idgen = IndexGen(init_index=self.base+1) + """index generator for undecodable variables""" + # exceptional cases + if self._orig_cnf == (): + self.nvar = 0 + self.cnf = () return - - if cnf == ((),): - self._nvar = 0 - self._ncls = 1 - self._packed_cnf = ((),) + if self._orig_cnf == ((),): + self.nvar = 0 + self.cnf = ((),) return - - result = [tuple([self.encode_lit(x) for x in cls]) for cls in cnf] - self._nvar = max([max(map(abs, cls)) for cls in result]) - """number of variables""" - assert self._nvar <= base + naux - - self._ncls = len(result) - """number of clauses in CNF""" - self._packed_cnf = tuple(result) + # Now, encode orig_cnf. + self.cnf = tuple(\ + [tuple([self._encode_lit(x) for x in clause])\ + for clause in self._orig_cnf]\ + ) """CNF, where variables are renumbered so that all indices from 1 to - _nvar appear at least once in CNF.""" - self._base = base - """maximum index of variables appearing in input formulas.""" - - def encode_lit(self, lit: int) -> int: - """Encodes literal. + nvar appear at least once and all indices from 1 to base are decodable.""" + self.nvar = self.base + self._idgen.get_count() + """maximum variable index""" + if check_cnf: + self._check_cnf() + + def _check_cnf(self): + # check nvar + max_var = 0 + for clause in self.cnf: + for lit in clause: + var = abs(lit) + max_var = var if max_var < var else max_var + assert max_var == self.nvar, f"{max_var} == {self.nvar}" + # check no missing variable + checked_var = [False]*(self.nvar+1) + for clause in self.cnf: + for lit in clause: + var = abs(lit) + checked_var[var] = True + assert False not in checked_var[1:] + # check base + if self._st is not None: + for i in range(1,self.nvar+1): + if i <= self.base: + assert self._dec[i] <= self._orig_base\ + and self._st.is_decodable_boolean_var(self._dec[i]) + else: + assert self._dec[i] > self._orig_base\ + or not self._st.is_decodable_boolean_var(self._dec[i]) + # check encoding + decoded_cnf =\ + [tuple([self._decode_lit(x) for x in clause]) for clause in self.cnf] + assert len(decoded_cnf) == len(self._orig_cnf) + for i in range(len(decoded_cnf)): + assert decoded_cnf[i] == self._orig_cnf[i] + + def _encode_lit(self, lit: int) -> int: + """Encodes literal of internal variable to external one. Args: - lit: literal of an internal CNF variable. + lit: literal of internal CNF variable. Returns: - int: returns a literal of an external CNF variable. + literal of external CNF variable. """ var = abs(lit) - if var not in self._dic: - self._inv_list.append(var) - self._dic[var] = len(self._inv_list) - - res = self._dic[var] - return res if lit > 0 else -res - - def decode_lit(self, lit: int) -> int: - """Decodes literal. + if self._st is not None\ + and var <= self._orig_base\ + and self._st.is_decodable_boolean_var(var): + assert var in self._enc + assert self._dec[self._enc[var]] == var + else: + if var not in self._enc: + ext_var = self._idgen.get_next() + assert self.base < ext_var + self._enc[var] = ext_var + self._dec[ext_var] = var + ext_var = self._enc[var] + return ext_var if lit > 0 else -ext_var + + def _decode_lit(self, lit: int) -> int: + """Decodes literal of external variable to internal one. Args: - lit: literal of an external CNF variable + lit: literal of external CNF variable Returns: - int: literal of an internal CNF variable. + literal of internal CNF variable. """ var = abs(lit) - if not (0 < var <= len(self._inv_list)): - raise IndexError + int_var = self._dec[var] + return int_var if lit > 0 else -int_var - res = self._inv_list[var - 1] - return res if lit > 0 else -res - - def decode_assignment(self, assign: tuple[int]) -> tuple[int]: + def decode_assignment(self, assign: Iterable) -> tuple[int]: """Decodes assignment. Args: assign: assignment of external CNF variables Returns: - tuple: assignment of internal CNF variables (but excluding - auxilliary variables). + tuple: assignment of DECODATABLE internal CNF variables. """ - res = [self.decode_lit(x) for x in assign \ - if abs(self.decode_lit(x)) <= self._base] - return tuple(res) + return tuple([self._decode_lit(x) for x in assign if x <= self.base]) def get_nvar(self) -> int: """Gets the number of variables in CNF.""" - return self._nvar + warn_msg = "`get_nvar()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.nvar def get_ncls(self) -> int: """Gets the number of clauses in CNF.""" - return self._ncls + warn_msg = "`get_ncls()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return len(self.cnf) def get_clause(self, pos: int) -> tuple[int]: """Gets clause of given position. @@ -129,42 +164,42 @@ def get_clause(self, pos: int) -> tuple[int]: Args: pos: position ranging from 0 to the number of clauses minus 1. """ - if not (0 <= pos < len(self._packed_cnf)): + warn_msg = "`get_clause()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + if not (0 <= pos < len(self.cnf)): raise IndexError + return self.cnf[pos] - return self._packed_cnf[pos] - - def write(self, stream=None) -> None: + def write(self, stream=None, format: str = "dimacs") -> None: """Write CNF in DIMACS CNF format. Args: stream: stream (stdout if not specified) to which CNF is written. + format: file format of cnf """ + if self.nvar == 0 or len(self.cnf): + raise Exception("cnf has no variable or no clause.") if stream == None: stream = sys.stdout - - dom = [] - if self.st != None: - for pos, obj in enumerate(self.st.domain): - code = self.st.get_code(obj) + if format != "dimacs": + raise Exception(f"unknown format: {format}") + if self._st is not None: + dom = [] + for pos, obj in enumerate(self._st.domain): + code = self._st.get_code(obj) name = NameMgr.lookup_name(obj) - dom.append(f"c dom {name}: "+" ".join(map(str, code))) - - enc = [ - f"c enc {i+1} {NameMgr.lookup_name(self.decode_lit(i+1))}" - for i in range(self._nvar) - if self.decode_lit(i + 1) <= self._base - ] - body = [" ".join(map(str, cls)) + " 0" for cls in self._packed_cnf] - - if stream != None: - out = "" - out += f"p cnf {self._nvar} {len(self._packed_cnf)}\n" - for expr in self._expr_tup: - out += f"c expr {op.to_str(expr)}\n" - if self.st != None: - out += "\n".join(dom+enc+body) - else: - out += "\n".join(enc+body) - out += "\n" - stream.write(out) + dom.append(f"c domain {name}: "+" ".join(map(str, code))) + enc = [ + f"c enc {i+1} {NameMgr.lookup_name(self.decode_lit(i+1))}" + for i in range(self.nvar) + if self.decode_lit(i + 1) <= self.base + ] + body = [" ".join(map(str, clause)) + " 0" for clause in self.cnf] + out = "" + out += f"p cnf {self.nvar} {len(self.cnf)}\n" + if self._st is not None: + out += "\n".join(dom+enc+body) + else: + out += "\n".join(enc+body) + out += "\n" + stream.write(out) diff --git a/pygplib/constraints.py b/pygplib/constraints.py new file mode 100644 index 0000000..2a678ed --- /dev/null +++ b/pygplib/constraints.py @@ -0,0 +1,59 @@ +from typing import Iterable + +from .name import NameMgr + +def at_most_r(x: Iterable[int], r: int) -> str: + """Computes at-most-r constraint of O.Bailleux and Y.Boufkhad. + + See also TAOCP Vol.4, Fascicle 6, p.8. + + Example: + x1 + x2 + x3 <= 2 + Suppose that the indices of x1,x2,x3 are 1,2,3, respectively. + > x = [1, 2, 3] + > r = 2 + > at_most_r(x,r) + """ + if len(x) == 0: + raise Exception("No variable given") + for i in x: + if not NameMgr.has_name(i): + raise Exception("found variable with no name.") + x = (0,) + tuple(x) + b_dict = {} + + def b(k: int, i: int) -> str: + if i == 0: + return "T" + if i == r+1: + return "F" + if (k,i) not in b_dict: + b_dict[(k,i)] = NameMgr.get_aux_index() + return f"{NameMgr.lookup_name(b_dict[(k,i)])}" + + n = len(x)-1 + if r >= n: + return "T" + if r < 0: + return "F" + if r == 0: + return " & ".join(["~"+NameMgr.lookup_name(x[i]) for i in range(1,len(x))]) + t = [0]*(2*n) + for k in range(2*n-1,n-1,-1): + t[k] = 1 + b_dict[(k,1)] = x[k-n+1] + for k in range(n-1,0,-1): + t[k] = min(r,t[2*k]+t[2*k+1]) + li = [] + for k in range(2,n): + for i in range(t[2*k]+1): + for j in range(t[2*k+1]+1): + if not 1 <= i+j <= t[k]+1: + continue + li.append(f"(~{b(2*k,i)} | ~{b(2*k+1,j)} | {b(k,i+j)})") + for i in range(t[2]+1): + for j in range(t[3]+1): + if i+j != r+1: + continue + li.append(f"(~{b(2,i)} | ~{b(3,j)})") + return " & ".join(li) diff --git a/pygplib/ecc.py b/pygplib/ecc.py index 57781d5..9c20db3 100644 --- a/pygplib/ecc.py +++ b/pygplib/ecc.py @@ -1,4 +1,5 @@ import random +from simpgraph import SimpGraph class Ecc: """Class for computing an edge clique cover. @@ -8,51 +9,41 @@ class Ecc: https://doi.org/10.1016/j.ic.2019.104464 """ - def __init__(self, vertex_list: list, edge_list: list): - if len(set(vertex_list)) != len(vertex_list): - raise Exception(f"duplicate vertex found: {vertex_list}") - if False in list(map(lambda x: x > 0, vertex_list)): - raise Exception(f"non-positive index found: {vertex_list}") + def __init__(self, vertex_list, edge_list): + for v in vertex_list: + if v < 0: + raise Exception() for e in edge_list: - if len(e) != 2 or e[0] == e[1]: - raise Exception(f"invalid edge: {e}") - if e[0] not in vertex_list\ - or e[1] not in vertex_list: - raise Exception(f"invalid vertex specified by edge: {e}") - - self._verts = tuple(vertex_list) - """tuple of vertices""" - self._edges = tuple([tuple(sorted(e)) for e in edge_list]) - """tuple of edges, where each edge is a sorted tuple.""" + if type(e) != tuple and len(e) != 2: + raise Exception() + if e[0] not in vertex_list or e[1] not in vertex_list: + raise Exception() + self._G = SimpGraph() + """graph""" + for v in vertex_list: + self._G.add_vertex(v) + for v,w in edge_list: + self._G.add_edge(v,w) self._invdic = {} - """dictionary to find the position of edge or vertex in self._edges""" - for pos, e in enumerate(self._edges): + """dictionary to find the position of edge or vertex in _G.all_edges""" + for pos, e in enumerate(self._G.all_edges()): assert e not in self._invdic self._invdic[e] = pos - for pos, v in enumerate(self._verts): + for pos, v in enumerate(self._G.all_vertices()): assert v not in self._invdic self._invdic[v] = pos - self._N = {} - """dictionary to find the neighborhood of a vertex""" - for v in self._verts: - self._N[v] = set() - for e in self._edges: - self._N[e[0]].add(e[1]) - self._N[e[1]].add(e[0]) - self.nof_isolated_verts = 0 - """number of isolated vertices""" - for v in self._verts: - if len(self._N[v]) == 0: - self.nof_isolated_verts += 1 - self.nof_isolated_edges = 0 - """number of isolated edges""" - for e in self._edges: - if len(self._N[e[0]]) == 1 and len(self._N[e[1]]) == 1: - self.nof_isolated_edges += 1 - - if self.nof_isolated_verts > 1: - raise Exception(f"number of isolated vertices must be at most one.") - if self.nof_isolated_edges > 0: + nof_isolated_verts = 0 + for v in self._G.all_vertices(): + if len(self._G.adj_vertices(v)) == 0: + nof_isolated_verts += 1 + nof_isolated_edges = 0 + for v,w in self._G.all_edges(): + if len(self._G.adj_vertices(v)) == 1\ + and len(self._G.adj_vertices(w)) == 1: + nof_isolated_edges += 1 + if nof_isolated_verts > 0: + raise Exception(f"isolated vertex is not allowed.") + if nof_isolated_edges > 0: raise Exception(f"isolated edge is not allowed.") def _select_uncovered_edge(self, U: list, variant: str = "r"): @@ -66,8 +57,7 @@ def _select_uncovered_edge(self, U: list, variant: str = "r"): vertices that are incident to the selected edge. """ if variant == "r": - i = random.randint(0,len(U)-1) - return U[i][0], U[i][1] + return random.choice(U) else: raise Exception(f"NotImplementedError: variant {variant}") @@ -87,8 +77,7 @@ def _extract_node(self, P: set, Q: list, variant: str = "r")\ return -1 if variant == "r": - pos = random.randint(0,len(P)-1) - return list(P)[pos] + return random.choice(list(P)) else: raise Exception(f"NotImplementedError: variant {variant}") @@ -103,11 +92,11 @@ def _find_clique_covering(self, u: int, v:int, U: list, \ variant: variant for the vertex extraction """ Q = [u,v] - P = self._N[u] & self._N[v] + P = set(self._G.adj_vertices(u)) & set(self._G.adj_vertices(v)) z = self._extract_node(P,Q,variant) while z != -1: Q.append(z) - P = P & self._N[z] + P = P & set(self._G.adj_vertices(z)) z = self._extract_node(P,Q,variant) return tuple(sorted(Q)) @@ -124,12 +113,10 @@ def _mark_all_edges_covered(self, Q: tuple, U: list, p: list) -> None: for i in range(len(Q)): for j in range(i+1,len(Q)): e = tuple(sorted([Q[i],Q[j]])) - assert e in self._invdic pos = self._invdic[e] if not p[pos] < len(U): # e is already covered. continue U[p[pos]] = U[-1] - assert U[-1] in self._invdic p[self._invdic[U[-1]]] = p[pos] p[pos] = len(U)-1 U.pop(-1) @@ -138,22 +125,22 @@ def compute_ecc(self, variant: str = "rr") -> list[list[int]]: """Computes an edge clique cover of a given graph.""" if len(variant) != 2: raise Exception(f"variant {variant} not defined") - C = [] # edge clique cover - U = [e for e in self._edges] + C = [] # edges remaining uncovered - p = [pos for pos in range(len(self._edges))] + U = list(self._G.all_edges()) # list to find the position of each edge in U. - # U[p[i]] == self._edges[i] holds if the i-th edge remains uncovered. + # U[p[i]] == all_edges[i] holds if the i-th edge remains uncovered. + p = list(range(len(self._G.all_edges()))) while U != []: - u,v = self._select_uncovered_edge(U,variant=variant[0]) + (u,v) = self._select_uncovered_edge(U,variant=variant[0]) Q = self._find_clique_covering(u,v,U,variant=variant[1]) C.append(Q) self._mark_all_edges_covered(Q,U,p) return tuple(C) - def _select_unseparated_edge(self, U: list, variant: str = "r"): - """Selects an unseparated edge at random. + def _select_unseparated_block(self, U: list, variant: str = "r"): + """Selects an unseparated block at random. Args: U: list of uncovered edges @@ -163,78 +150,64 @@ def _select_unseparated_edge(self, U: list, variant: str = "r"): vertices that are incident to the selected edge. """ if variant == "r": - i = random.randint(0,len(U)-1) - return U[i][0], U[i][1] + return random.choice(U) else: raise Exception(f"NotImplementedError: variant {variant}") - def _find_clique_separating(self, u: int, v:int, U: list, \ - variant: str = "r") -> tuple: - """Finds a clique separating a given edge. + def _find_clique_separating(self, S: set, variant: str = "r") -> tuple: + """Finds a clique separating a given block. Args: - u: one of the vertices that are adjacent with each other - v: the other vertex - U: list of uncovered edges + S: set of vertices of at least size 2 variant: variant for the vertex extraction """ - if len(self._N[u]) == 1: - Q = [v] - P = self._N[v] - {u} - else: - Q = [u] - P = self._N[u] - {v} + assert len(S) >= 2 + u = random.choice(list(S)) + v = random.choice(list(S-{u})) + # Find a clique separaring u and v. + if len(self._G.adj_vertices(u)) == 1: + u,v = v,u + Q = [u] + P = set(self._G.adj_vertices(u)) - {v} assert len(P) > 0 z = self._extract_node(P,Q,variant) while z != -1: + assert z != v Q.append(z) - P = P & self._N[z] + P = P & set(self._G.adj_vertices(z)) z = self._extract_node(P,Q,variant) return tuple(sorted(Q)) - def _mark_all_edges_separated(self, Q: tuple, U: list, p: list) -> None: - """Marks all edges of Q as separated. - - Update U and p so that all edges in Q are marked as separated. + def _separate_blocks(self, Q: tuple, U: list) -> list: + """Separate blocks by a clique. Args: Q: clique - U: list of unseparated edges - p: list to find the position of each edge in U. + U: list of blocks """ - for u in Q: - for v in self._N[u] - set(Q): - e = tuple(sorted([u,v])) - assert e in self._invdic - pos = self._invdic[e] - if not p[pos] < len(U): # e is already separated. - continue - U[p[pos]] = U[-1] - assert U[-1] in self._invdic - p[self._invdic[U[-1]]] = p[pos] - p[pos] = len(U)-1 - U.pop(-1) - + new_U = [] + while U != []: + S = U.pop() + T = [set(),set()] + for w in S: + if w in Q: + T[1].add(w) + else: + T[0].add(w) + for i in [0,1]: + if len(T[i]) > 1: + new_U.append(T[i]) + return new_U + def compute_separating_ecc(self, variant: str = "rr") -> list[list[int]]: """Computes a separating edge clique cover of a given graph.""" C = list(self.compute_ecc(variant=variant)) - - # separating edge clique cover - M = [set() for v in self._verts] - # cliques incident to each vertex - for pos, Q in enumerate(C): - for v in Q: - M[self._invdic[v]].add(pos) - U = [e for e in self._edges \ - if M[self._invdic[e[0]]] == M[self._invdic[e[1]]]] - # edges remaining unseparated - p = [pos for pos in range(len(self._edges))] - # list to find the position of each edge in U. - # U[p[i]] == self._edges[i] holds if the i-th edge remains unseparated. + U = [set(self._G.all_vertices())] + for Q in C: + U = self._separate_blocks(Q, U) while U != []: - u,v = self._select_unseparated_edge(U,variant=variant[0]) - assert tuple(sorted([u,v])) in self._edges - Q = self._find_clique_separating(u,v,U,variant=variant[1]) + S = self._select_unseparated_block(U,variant=variant[0]) + Q = self._find_clique_separating(S,variant=variant[1]) C.append(Q) - self._mark_all_edges_separated(Q,U,p) + U = self._separate_blocks(Q,U) return tuple(C) diff --git a/pygplib/fog.py b/pygplib/fog.py index 9716121..673c39c 100644 --- a/pygplib/fog.py +++ b/pygplib/fog.py @@ -2,6 +2,8 @@ import warnings +from functools import cmp_to_key + import pyparsing as pp # Enables "packrat" parsing, which speedup parsing. # See https://pyparsing-docs.readthedocs.io/en/latest/pyparsing.html . @@ -16,6 +18,16 @@ class Fog(AbsFo): """Expression of First-order logic of graphs + :: + + Node | self._aux[0] | self._aux[1] + ---------------|----------------|-------------- + True constant | - | - + False constant | - | - + Quantifier | index of | - + | bound variable | + Relation | 1st argument | 2nd argument + Note: In the current implementation, syntactically identical subexpressions are shared. We have to keep it in mind that this might become source of @@ -33,31 +45,26 @@ class Fog(AbsFo): Attributes: _EDG: string, representing adjacency relation of vertices. _EQ: string, representing equality relation of vertices. + _LT: string, representing less-than relation of vertices. _ATOM_TAGS: tuple of strings, representing types of atom. _BINOP_TAGS: tuple of strings, representing types of binary operator. _UNOP_TAGS: tuple of strings, representing types of uniary operator. _QF_TAGS: tuple of strings, representing types of quantifier. _EXPR_TAGS: tuple of available tags in this class. - _ATOM_BEGIN_POS: beginning position of the atom part of the aux field. - _ATOM_LEN: length of the atom part of the aux field. - _AUX_LEN: length of the aux field. """ # Tag-related Variables and Methods _EDG = "edg" _EQ = "=" + _LT = "<" - _ATOM_TAGS = AbsFo._ATOM_TAGS + (_EDG, _EQ) + _ATOM_TAGS = AbsFo._ATOM_TAGS + (_EDG, _EQ, _LT) _BINOP_TAGS = AbsFo._BINOP_TAGS _UNOP_TAGS = AbsFo._UNOP_TAGS _QF_TAGS = AbsFo._QF_TAGS _EXPR_TAGS = _ATOM_TAGS + _BINOP_TAGS + _UNOP_TAGS + _QF_TAGS - _ATOM_BEGIN_POS = AbsFo._AUX_LEN - _ATOM_LEN = 2 - _AUX_LEN = _ATOM_LEN + AbsFo._AUX_LEN - @classmethod def get_edg_tag(cls) -> str: """Gets tag of edg.""" @@ -68,24 +75,36 @@ def get_eq_tag(cls) -> str: """Gets tag of eq.""" return cls._EQ + @classmethod + def get_lt_tag(cls) -> str: + """Gets tag of lt.""" + return cls._LT + + + @staticmethod + def cmp_atom_args(x, y) -> int: + if NameMgr.lookup_name(x) == NameMgr.lookup_name(y): + return 0 + if NameMgr.lookup_name(x) < NameMgr.lookup_name(y): + return -1 + if NameMgr.lookup_name(x) > NameMgr.lookup_name(y): + return 1 + raise Exception() + # Constructor-related Methods @classmethod - def _normalize_aux(cls, aux: tuple) -> tuple: + def _normalize_aux(cls, tag: str, aux: tuple) -> tuple: """Normalize the order of arguments in relation symbol. The order is determined based on the order of names. """ - if aux == (): - return (0,) * cls._AUX_LEN - - if ( - len(aux) == 3 - and aux[1] > 0 - and aux[2] > 0 - and NameMgr.lookup_name(aux[1]) > NameMgr.lookup_name(aux[2]) - ): - return (aux[0], aux[2], aux[1]) - + # sort aux if symmetric relation + if tag in [cls.get_edg_tag(), cls.get_eq_tag()]: + return tuple(sorted(aux, key=cmp_to_key(cls.cmp_atom_args))) + # do not sort if non-symmetric relation + if tag == cls.get_lt_tag(): + return aux + assert len(aux) < 2 return aux @classmethod @@ -98,7 +117,7 @@ def edg(cls, x: int, y: int) -> AbsExpr: """ if not (NameMgr.has_name(x) and NameMgr.has_name(y)): raise ValueError("Specified index of edg has no name.") - return cls(cls._EDG, aux=(0, x, y)) + return cls(cls._EDG, aux=(x, y)) @classmethod def eq(cls, x: int, y: int) -> AbsExpr: @@ -110,21 +129,33 @@ def eq(cls, x: int, y: int) -> AbsExpr: """ if not (NameMgr.has_name(x) and NameMgr.has_name(y)): raise ValueError("Specified index of eq has no name.") - return cls(cls._EQ, aux=(0, x, y)) + return cls(cls._EQ, aux=(x, y)) @classmethod - def atom(cls, tag: str, x: int, y: int) -> AbsExpr: - """Gets the tag-specified atomic formula. + def lt(cls, x: int, y: int) -> AbsExpr: + """Gets the atomic formula of the form lt(x,y). Args: - x: index of (variable or constant) symbol (or 0 for true or false) - y: index of (variable or constant) symbol (or 0 for true or false) + x: index of (variable or constant) symbol + y: index of (variable or constant) symbol + """ + if not (NameMgr.has_name(x) and NameMgr.has_name(y)): + raise ValueError("Specified index of lt has no name.") + return cls(cls._LT, aux=(x,y)) + + @classmethod + def atom(cls, tag: str, *args) -> AbsExpr: + """Gets the tag-specified atomic formula. + + Args:\ + args: tuple of indices of (variable or constant) symbols (or 0 for true or false) """ if tag not in cls._ATOM_TAGS: raise ValueError( f"Expression tag, {tag}, is not available in {cls.__name__}" ) - return cls(tag, aux=(0, x, y)) + assert len(args) == 2 + return cls(tag, aux=args) # Instance Methods def get_atom_value(self, i: int) -> int: @@ -133,11 +164,25 @@ def get_atom_value(self, i: int) -> int: Args: i: position of term (1 or 2) """ - if not 0 < i <= type(self)._ATOM_LEN: - raise IndexError( - f"Value index should range from 1 to {type(self)._ATOM_LEN}" - ) - return self._aux[type(self)._ATOM_BEGIN_POS + i - 1] + warn_msg = "`get_atom_value()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.get_atom_arg(i) + + def get_atom_arg(self, i: int) -> int: + """Gets the i-th argument of the atomic formula. + + Args: + i: argument index (1 or 2) + + Returns: + term given as argument of the atom. + """ + if self.is_edg_atom() or self.is_eq_atom() or self.is_lt_atom(): + if i != 1 and i != 2: + raise IndexError("Argument index should be 1 or 2") + else: + raise Exception("Unknown atom") + return self._aux[i-1] def is_edg_atom(self) -> bool: """Is it an atom of the form edg(x,y) ?""" @@ -147,46 +192,45 @@ def is_eq_atom(self) -> bool: """Is it an atom of the form x=y ?""" return self.get_tag() == type(self)._EQ + def is_lt_atom(self) -> bool: + """Is it an atom of the form x None: """Performs NNF computation for this object.""" - if self.is_edg_atom() or self.is_eq_atom(): + if self.is_edg_atom() or self.is_eq_atom() or self.is_lt_atom(): t.append([0, lambda: type(self).neg(self) if negated else self]) return super().compute_nnf_step(negated, s, t) def substitute_step(self, y: int, x: int, assoc: dict) -> None: """Performs substitution for this object.""" - if self.is_edg_atom() or self.is_eq_atom(): - op = [self.get_atom_value(1), self.get_atom_value(2)] - if op[0] == x: - op[0] = y - if op[1] == x: - op[1] = y - assoc[id(self)] = type(self).atom(self.get_tag(), op[0], op[1]) + if self.is_edg_atom() or self.is_eq_atom() or self.is_lt_atom(): + op = [self.get_atom_arg(1), self.get_atom_arg(2)] + for i, val in enumerate(op): + if val == x: + op[i] = y + assoc[id(self)] = type(self).atom(self.get_tag(), *op) return super().substitute_step(y, x, assoc) def get_free_vars_and_consts_pre_step( self, bound_vars: list, free_vars: list ) -> None: - """Performs computatoin for this object.""" - if self.is_edg_atom() or self.is_eq_atom(): - v = self.get_atom_value(1) - w = self.get_atom_value(2) - - if v not in bound_vars: - free_vars.append(v) - if w not in bound_vars: - free_vars.append(w) + """Performs computation for this object.""" + if self.is_edg_atom() or self.is_eq_atom() or self.is_lt_atom(): + for v in self._aux: + if v not in bound_vars: + free_vars.append(v) return super().get_free_vars_and_consts_pre_step(bound_vars, free_vars) def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: - """Performs reduce computatoin for this object.""" + """Performs reduce computation for this object.""" if self.is_edg_atom(): - op = [self.get_atom_value(1), self.get_atom_value(2)] + op = [self.get_atom_arg(1), self.get_atom_arg(2)] if op[0] == op[1]: # always false regardless of graphs assoc[id(self)] = type(self).false_const() return @@ -201,7 +245,7 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: return if self.is_eq_atom(): - op = [self.get_atom_value(1), self.get_atom_value(2)] + op = [self.get_atom_arg(1), self.get_atom_arg(2)] if op[0] == op[1]: # always true regardless of graphs assoc[id(self)] = type(self).true_const() return @@ -215,48 +259,71 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: assoc[id(self)] = self return + if self.is_lt_atom(): + op = [self.get_atom_arg(1), self.get_atom_arg(2)] + if op[0] == op[1]: + assoc[id(self)] = type(self).false_const() + return + if NameMgr.is_constant(op[0]) and NameMgr.is_constant(op[1]): + if st != None: + if st.lt(op[0], op[1]): + assoc[id(self)] = type(self).true_const() + else: + assoc[id(self)] = type(self).false_const() + return + assoc[id(self)] = self + return + super().reduce_formula_step(assoc, st) def perform_boolean_encoding_step(self, assoc: dict, st: BaseRelSt) -> None: """Performs Boolean encoding for this object.""" if self.is_edg_atom(): - atom = [self.get_atom_value(1), self.get_atom_value(2)] + atom = [self.get_atom_arg(1), self.get_atom_arg(2)] if atom[0] == atom[1]: - assoc[id(self)] = st.encode_F() + assoc[id(self)] = st.be_F() else: - assoc[id(self)] = st.encode_edg(atom[0],atom[1]) + assoc[id(self)] = st.be_edg(atom[0],atom[1]) return if self.is_eq_atom(): - atom = [self.get_atom_value(1), self.get_atom_value(2)] + atom = [self.get_atom_arg(1), self.get_atom_arg(2)] if atom[0] == atom[1]: - assoc[id(self)] = st.encode_T() + assoc[id(self)] = st.be_T() else: - assoc[id(self)] = st.encode_eq(atom[0],atom[1]) + assoc[id(self)] = st.be_eq(atom[0],atom[1]) + return + + if self.is_lt_atom(): + atom = [self.get_atom_arg(1), self.get_atom_arg(2)] + if atom[0] == atom[1]: + assoc[id(self)] = st.be_F() + else: + assoc[id(self)] = st.be_lt(atom[0],atom[1]) return if (\ - self.is_land_term()\ - or self.is_lor_term()\ - or self.is_implies_term()\ - or self.is_iff_term()\ + self.is_land()\ + or self.is_lor()\ + or self.is_implies()\ + or self.is_iff()\ ): left = assoc[id(self.get_operand(1))] right = assoc[id(self.get_operand(2))] assoc[id(self)] = Prop.binop(self.get_tag(), left, right) return - if self.is_neg_term(): + if self.is_neg(): op = assoc[id(self.get_operand(1))] assoc[id(self)] = Prop.neg(op) return if self.is_true_atom(): - assoc[id(self)] = st.encode_T() + assoc[id(self)] = st.be_T() return if self.is_false_atom(): - assoc[id(self)] = st.encode_F() + assoc[id(self)] = st.be_F() return assert False @@ -290,10 +357,11 @@ def read(cls, formula_str: str) -> AbsExpr: An atomic formula is defined as follows:: - ::= | | + ::= | | | ::= "edg" "(" "," ")" ::= "=" ::= "T" | "F" + ::= "<" ::= | A formula is defined as follows:: @@ -323,28 +391,32 @@ def read(cls, formula_str: str) -> AbsExpr: Args: formula_str: string representation of formula """ + if formula_str.strip() == "": + raise Exception("no formula given") # The following tokens are suppressed in the parsed result. COMMA = pp.Suppress(cls.get_comma_tag()) LPAREN = pp.Suppress(cls.get_lparen_tag()) RPAREN = pp.Suppress(cls.get_rparen_tag()) - # Variables are strings that match the pattern [a-z][a-z0-9_]* . + # Variables are strings that match the pattern [a-z_][a-z0-9_]* . # Constants are strings that match the pattern [A-Z][A-Z0-9_]* . VAR_SYMB = pp.Word(pp.srange("[a-z]"), pp.srange("[a-z0-9_]")) CON_SYMB = pp.Word(pp.srange("[A-Z]"), pp.srange("[A-Z0-9_]")) TERM = VAR_SYMB | CON_SYMB EDG_SYMB = pp.Literal(cls.get_edg_tag()) - EQ_SYMB = pp.Literal(cls.get_eq_tag()) + EQ_SYMB = pp.Literal(cls.get_eq_tag()) + LT_SYMB = pp.Literal(cls.get_lt_tag()) EDG_REL = EDG_SYMB + LPAREN + TERM + COMMA + TERM + RPAREN - EQ_REL = TERM + EQ_SYMB + TERM + EQ_REL = TERM + EQ_SYMB + TERM + LT_REL = TERM + LT_SYMB + TERM - TRUE = pp.Literal(cls.get_true_const_tag()) + TRUE = pp.Literal(cls.get_true_const_tag()) FALSE = pp.Literal(cls.get_false_const_tag()) CON_REL = TRUE | FALSE - ATOM_EXPR = EDG_REL | EQ_REL | CON_REL + ATOM_EXPR = EDG_REL | LT_REL | EQ_REL | CON_REL UNOP = pp.Literal(cls.get_neg_tag()) @@ -391,6 +463,15 @@ def action_eq_rel(string, location, tokens): return cls.eq(op1, op2) assert False + @LT_REL.set_parse_action + def action_lt_rel(string, location, tokens): + assert len(tokens) == 3, f"{tokens}" + if tokens[1] == cls.get_lt_tag(): + op1 = int(tokens[0]) + op2 = int(tokens[2]) + return cls.lt(op1, op2) + assert False + @CON_REL.set_parse_action def action_con_rel(string, location, tokens): assert len(tokens) == 1, f"{tokens}" @@ -456,15 +537,20 @@ def action_qfop(string, location, tokens): def make_str_pre_step(self) -> str: if self.is_edg_atom(): - name1 = NameMgr.lookup_name(self.get_atom_value(1)) - name2 = NameMgr.lookup_name(self.get_atom_value(2)) + name1 = NameMgr.lookup_name(self.get_atom_arg(1)) + name2 = NameMgr.lookup_name(self.get_atom_arg(2)) return f"edg({name1}, {name2})" if self.is_eq_atom(): - name1 = NameMgr.lookup_name(self.get_atom_value(1)) - name2 = NameMgr.lookup_name(self.get_atom_value(2)) + name1 = NameMgr.lookup_name(self.get_atom_arg(1)) + name2 = NameMgr.lookup_name(self.get_atom_arg(2)) return f"{name1} = {name2}" + if self.is_lt_atom(): + name1 = NameMgr.lookup_name(self.get_atom_arg(1)) + name2 = NameMgr.lookup_name(self.get_atom_arg(2)) + return f"{name1} < {name2}" + return super().make_str_pre_step() def make_str_in_step(self) -> str: @@ -475,6 +561,9 @@ def make_str_in_step(self) -> str: if self.is_eq_atom(): return "" + if self.is_lt_atom(): + return "" + return super().make_str_in_step() def make_str_post_step(self) -> str: @@ -485,23 +574,21 @@ def make_str_post_step(self) -> str: if self.is_eq_atom(): return "" + if self.is_lt_atom(): + return "" + return super().make_str_post_step() def make_node_str_step(self) -> str: - if self.is_edg_atom() or self.is_eq_atom(): - - op1 = ( - NameMgr.lookup_name(self.get_atom_value(1)) - if self.get_atom_value(1) != 0 - else "-" - ) - op2 = ( - NameMgr.lookup_name(self.get_atom_value(2)) - if self.get_atom_value(2) != 0 - else "-" - ) - - return f'"edg({op1},{op2})"' if self.is_edg_atom() else f'"{op1}={op2}"' - + if self.is_edg_atom() or self.is_eq_atom() or self.is_lt_atom(): + op = [] + for val in self._aux: + op.append(NameMgr.lookup_name(val) if val != 0 else "-") + if self.is_edg_atom(): + return f'"edg({op[0]},{op[1]})"' + if self.is_eq_atom(): + return f'"{op[0]}={op[1]}"' + if self.is_edg_atom(): + return f'"{op[0]}<{op[1]}"' return super().make_node_str_step() diff --git a/pygplib/grst.py b/pygplib/grst.py index cc572e3..dda7fa4 100644 --- a/pygplib/grst.py +++ b/pygplib/grst.py @@ -2,10 +2,15 @@ from typing import overload +from simpgraph import SimpGraph + from .name import NameMgr from .symrelst import SymRelSt from .prop import Prop +from .fog import Fog from .ecc import Ecc +from . import constraints +from . import op class GrSt(SymRelSt): """Manages graph structure for interpreting formulas (and index-mapping). @@ -37,65 +42,163 @@ class GrSt(SymRelSt): """ _EDGE_ENC = "edge" _CLIQUE_ENC = "clique" + _VERTEX_ENC = "vertex" _DIRECT_ENC = "direct" _LOG_ENC = "log" - _ENCODING = (_EDGE_ENC, _CLIQUE_ENC, _DIRECT_ENC, _LOG_ENC,) + _ENCODING = (_EDGE_ENC, _CLIQUE_ENC, _VERTEX_ENC, _DIRECT_ENC, _LOG_ENC,) def __init__(self, vertex_list: list, edge_list: list, \ - encoding: str = "edge", prefix: str = "V"): + encoding: str = "edge", prefix: str = "V", ecc = None,\ + msg: str = ""): """Initialize a graph structure. Args: vertex_list: list of distinct vertices in an arbitrary order edge_list: list of distinct edges in an arbitrary order - encoding: type of encoding ("edge", "clique", "direct", "log") + encoding: type of encoding ("edge", "clique", "vertex", "direct", "log") prefix: prefix of vertex name, which must be uppercase. + ecc: edge clique cover for clique encoding + msg: message printed when assertion failed """ + if len(vertex_list) == 0: + raise Exception(f"no vertex given: "+msg) for edge in edge_list: if len(edge) != 2: - raise Exception(f"edge must be of size 2: {edge}") + raise Exception(f"edge must be of size 2: "+msg) if edge[0] == edge[1]: - raise Exception(f"loop is not allowed: {edge}") + raise Exception(f"loop is not allowed: "+msg) if edge[0] not in vertex_list\ or edge[1] not in vertex_list: - raise Exception(f"invalid vertex found: {edge}") + raise Exception(f"invalid vertex found: "+msg) if encoding not in type(self)._ENCODING: - raise Exception(f"unsupported encoding type: {encoding}") + raise Exception(f"unsupported encoding type: "+msg) if not (prefix.isalpha() and prefix.isupper()): raise Exception(\ - "All characters must be alphabetic and uppercase letters") - + "All characters in prefix must be alphabetic and uppercase letters: "+msg) + + self._relation_dict = { + type(self)._EDGE_ENC: self._compute_relation_edge_enc, + type(self)._CLIQUE_ENC:self._compute_relation_clique_enc, + type(self)._VERTEX_ENC:self._compute_relation_vertex_enc, + type(self)._DIRECT_ENC:self._compute_relation_direct_enc, + type(self)._LOG_ENC: self._compute_relation_log_enc, + } + self._be_eq_dict = { + type(self)._EDGE_ENC: self._be_eq_arbit_enc, + type(self)._CLIQUE_ENC:self._be_eq_arbit_enc, + type(self)._VERTEX_ENC:self._be_eq_arbit_enc, + type(self)._DIRECT_ENC:self._be_eq_arbit_enc, + type(self)._LOG_ENC: self._be_eq_arbit_enc, + } + self._be_edg_dict = { + type(self)._EDGE_ENC: self._be_edg_edge_enc, + type(self)._CLIQUE_ENC:self._be_edg_edge_enc, + type(self)._VERTEX_ENC:self._be_edg_vertex_enc, + type(self)._DIRECT_ENC:self._be_edg_direct_enc, + type(self)._LOG_ENC: self._be_edg_arbit_enc, + } + self._be_domain_dict = { + type(self)._EDGE_ENC: self._be_domain_arbit_enc, + type(self)._CLIQUE_ENC:self._be_domain_arbit_enc, + type(self)._VERTEX_ENC:self._be_domain_vertex_enc, + type(self)._DIRECT_ENC:self._be_domain_direct_enc, + type(self)._LOG_ENC: self._be_domain_log_enc, + } + self._be_aux_const_dict = { + type(self)._EDGE_ENC: self._compute_auxiliary_constraint_edge_enc, + type(self)._CLIQUE_ENC:self._compute_auxiliary_constraint_clique_enc, + type(self)._VERTEX_ENC:self._compute_auxiliary_constraint_vertex_enc, + type(self)._DIRECT_ENC:self._compute_auxiliary_constraint_direct_enc, + type(self)._LOG_ENC: self._compute_auxiliary_constraint_log_enc, + } + + self._ecc = ecc + self._domain_enc_dict = {} + """dictionary of auxiliary Boolean variables for the domain constraint""" + self._domain_dec_dict = {} + """Inverse mapping of _domain_enc_dict""" + self._edg_enc_dict = {} + """dictionary of auxiliary Boolean variables for the edg relation""" + self._edg_dec_dict = {} + """Inverse mapping of _edg_enc_dict""" + self._le_enc_dict = {} + """dictionary of auxiliary Boolean variables for the order relation""" + self._le_dec_dict = {} + """Inverse mapping of _le_enc_dict""" self._encoding = encoding """encoding type""" self._prefix = prefix """prefix of vertex name""" + self._vertex_to_index_dict = {} + self._index_to_vertex_dict = {} for vertex in vertex_list: - NameMgr.lookup_index(self._prefix + f"{vertex}") + i = NameMgr.lookup_index(self._prefix + f"{vertex}") + self._vertex_to_index_dict[vertex] = i + self._index_to_vertex_dict[i] = vertex self._verts = tuple(vertex_list) """vertices""" self._edges = tuple([tuple(sorted(edge)) for edge in edge_list]) """edges""" if len(set(self._verts)) != len(vertex_list): - raise Exception(f"duplicate vertex found: {vertex_list}") + raise Exception(f"duplicate vertex found: "+msg) if len(set(self._edges)) != len(edge_list): - raise Exception(f"duplicate edge found: {edge_list}") + raise Exception(f"duplicate edge found: "+msg) # vertex_to_object() and object_to_vertex() are now available! - objects = self.vertex_to_object(self._verts) - if self._encoding == type(self)._EDGE_ENC: - relation = self.vertex_to_object(self._edges) - elif self._encoding == type(self)._CLIQUE_ENC: - ecc = Ecc(self._verts, self._edges).compute_separating_ecc() - relation = self.vertex_to_object(ecc) - elif self._encoding == type(self)._DIRECT_ENC: - relation = self.vertex_to_object(tuple([(v,) for v in self._verts])) - elif self._encoding == type(self)._LOG_ENC: - relation = self.vertex_to_object(self._compute_log_relation()) + relation = self._relation_dict[self._encoding]() + super().__init__(objects, relation,msg=msg) + assert self.code_length > 0 + self._G = SimpGraph() + for v in objects: + self._G.add_vertex(v) + for v,w in self.vertex_to_object(self._edges): + self._G.add_edge(v,w) + if len(objects) > 0: + self.max_v = objects[0] + for v in objects[1:]: + if self.lt(self.max_v, v): + self.max_v = v + + def _out_neighborhood(self, v: int): + assert v in self.domain + return tuple([w for w in self.domain + if self._object_to_pos(w)+1 in self.get_code(v) and v != w]) + + def _in_neighborhood(self, v: int): + assert v in self.domain + return tuple([w for w in self.domain + if self._object_to_pos(v)+1 in self.get_code(w) and v != w]) + + def _compute_relation_edge_enc(self): + return self.vertex_to_object(self._edges) + + def _compute_relation_clique_enc(self): + if self._ecc is None: + return self.vertex_to_object( + Ecc(self._verts, self._edges).compute_separating_ecc()) else: - raise Exception(f"invalid encoding type: {self._encoding}") - super().__init__(objects, relation) + return self.vertex_to_object(self._ecc) + + def _compute_relation_vertex_enc(self): + pos_dict = {} + in_neigh_dict = {} + for i,v in enumerate(self._verts): + pos_dict[v] = i + in_neigh_dict[v] = {v} + for v,w in self._edges: + if pos_dict[v] < pos_dict[w]: + v,w = w,v + assert pos_dict[v] > pos_dict[w] + in_neigh_dict[v].add(w) + res = [] + for v in self._verts: + res.append(tuple(sorted(in_neigh_dict[v]))) + return self.vertex_to_object(tuple(res)) + + def _compute_relation_direct_enc(self): + return self.vertex_to_object(tuple([(v,) for v in self._verts])) - def _compute_log_relation(self) -> tuple: + def _compute_relation_log_enc(self): """Computes relation for log-encoding. Computes relation for log-encoding, where each relation instance means @@ -114,7 +217,7 @@ def _compute_log_relation(self) -> tuple: for j in range(n*i, min(n*(i+1),N))]\ )) n *= 2 - return tuple(res) + return self.vertex_to_object(tuple(res)) def vertex_to_object_int(self, vertex: int) -> int: """Converts vertex to object (constant symbol index). @@ -126,11 +229,7 @@ def vertex_to_object_int(self, vertex: int) -> int: """ if vertex not in self._verts: raise Exception(f"invalid vertex: {vertex}") - name = self._prefix + f"{vertex}" - if not NameMgr.has_index(name): - raise Exception(f"vertex: {vertex} not registered to NameMgr.") - obj = NameMgr.lookup_index(name) - return obj + return self._vertex_to_index_dict[vertex] def vertex_to_object_tuple(self, tup: tuple[int]) -> tuple[int]: """Converts vertex to object (constant symbol index). @@ -174,227 +273,426 @@ def object_to_vertex(self, obj: int) -> int: Returns: index of vertex. """ - name = NameMgr.lookup_name(obj) - if not (name.find(self._prefix) == 0 \ - and name[len(self._prefix) :].isdigit()): - raise Exception(f"{name} is not vertex name") + return self._index_to_vertex_dict[obj] - return int(name[len(self._prefix) :]) - - def adjacent(self, i: int, j: int) -> bool: + def adjacent(self, x: int, y: int) -> bool: """Determines if constants (meaning vertices) are adjacent. Args: - i: constant symbol index (meaning vertex) - j: constant symbol index (meaning vertex) + x: constant symbol index (meaning vertex) + y: constant symbol index (meaning vertex) Returns: True if adjacent, and False otherwise. """ - if i not in self.domain: - raise Exception(f"{i} is not a domain object.") - if j not in self.domain: - raise Exception(f"{j} is not a domain object.") - u = self.object_to_vertex(i) - v = self.object_to_vertex(j) - return tuple(sorted([u,v])) in self._edges - - def equal(self, i: int, j: int) -> bool: + if x not in self.domain: + raise Exception(f"{x} is not a domain object.") + if y not in self.domain: + raise Exception(f"{y} is not a domain object.") + return x in self._G.adj_vertices(y) + + def equal(self, x: int, y: int) -> bool: """Determines if constants (meaning vertices) are equal with each other. Args: - i: constant symbol index (meaning vertex) - j: constant symbol index (meaning vertex) + x: constant symbol index (meaning vertex) + y: constant symbol index (meaning vertex) Returns: True if equal, and False otherwise. """ - if i not in self.domain: - raise Exception(f"{i} is not a domain object.") - if j not in self.domain: - raise Exception(f"{j} is not a domain object.") - return i == j + if x not in self.domain: + raise Exception(f"{x} is not a domain object.") + if y not in self.domain: + raise Exception(f"{y} is not a domain object.") + return x == y + + def lt(self, x: int, y: int) -> bool: + """Determines if x is less than y in an internal order of vertices. - def _get_lit_list(self, index: int) -> list[Prop]: + Args: + x: constant symbol index (meaning vertex) + y: constant symbol index (meaning vertex) + Returns: + True if x is less than y, and False otherwise. + """ + px = self._get_lit_list(x) + py = self._get_lit_list(y) + for i in range(self.code_length): + i = self.code_length-i-1 + assert px[i] in [Prop.true_const(), Prop.false_const()] + assert py[i] in [Prop.true_const(), Prop.false_const()] + if px[i] == py[i]: + continue + if px[i] == Prop.false_const() and py[i] == Prop.true_const(): + return True + if px[i] == Prop.true_const() and py[i] == Prop.false_const(): + return False + return False + + def sorted(self, li: list) -> None: + """Sorts list of constants (i.e. vertices) in an increasing order. + + Args: + li: list of constant indices + + Returns: + Sorted list + """ + res = list(li) + n = len(res) + # selection sort based on the order defined by self.lt + for i in range(n): + minpos = i + minval = res[i] + for j in range(i+1,n): + if self.lt(res[j],minval): + minpos = j + minval = res[j] + tmp = res[i] + res[i] = minval + res[minpos] = tmp + # check sorted + for i in range(n-1): + assert self.lt(res[i],res[i+1]) or self.equal(res[i],res[i+1]) + return res + + def _get_lit_list(self, x: int) -> list[Prop]: """Gets list of literals, given a symbol index. Args: - index: symbol index (constant symbol or variable symbol) + x: symbol index (constant symbol or variable symbol) Returns: list of literals (Boolean variables' objects of Prop class) """ - if index in self.domain: - return [Prop.true_const() if pos+1 in self.get_code(index) \ + if x in self.domain: + return [Prop.true_const() if i+1 in self.get_code(x) \ else Prop.false_const() \ - for pos in range(self.code_length)] + for i in range(self.code_length)] else: - return [Prop.var(j) for j in self.get_boolean_var_list(index)] + return [Prop.var(p) for p in self.get_boolean_var_list(x)] - def encode_eq(self, i: int, j: int) -> Prop: + def encode_eq(self, x: int, y: int) -> Prop: """Encodes predicate of equality relation, given two symbols. Args: - i: symbol index (constant symbol or variable symbol) - j: symbol index (constant symbol or variable symbol) + x: symbol index (constant symbol or variable symbol) + y: symbol index (constant symbol or variable symbol) Returns: formula object of Prop class """ - li = Prop.bitwise_binop(Prop.get_iff_tag(), \ - self._get_lit_list(i), \ - self._get_lit_list(j)) + warn_msg = "`encode_eq()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.be_eq(x,y) + + def be_eq(self, x: int, y: int) -> Prop: + """Encodes predicate of equality relation, given two symbols. + + Args: + x: symbol index (constant symbol or variable symbol) + y: symbol index (constant symbol or variable symbol) + Returns: + formula object of Prop class + """ + return self._be_eq_dict[self._encoding](x,y) + + def _be_eq_arbit_enc(self, x: int, y: int) -> Prop: + px = self._get_lit_list(x) + py = self._get_lit_list(y) + li = [Prop.iff(px[i],py[i]) for i in range(self.code_length)] return Prop.binop_batch(Prop.get_land_tag(), li) - def encode_edg(self, i: int, j: int) -> Prop: + def encode_edg(self, x: int, y: int) -> Prop: """Encodes predicate of adjacency relation, given two symbols. Args: - i: symbol index (constant symbol or variable symbol) - j: symbol index (constant symbol or variable symbol) + x: symbol index (constant symbol or variable symbol) + y: symbol index (constant symbol or variable symbol) Returns: formula object of Prop class """ - res = None - if self._encoding in (type(self)._EDGE_ENC, type(self)._CLIQUE_ENC): - li = Prop.bitwise_binop(Prop.get_land_tag(), \ - self._get_lit_list(i), \ - self._get_lit_list(j)) - res = Prop.binop_batch(Prop.get_lor_tag(), li) - res = Prop.land(res, Prop.neg(self.encode_eq(i,j))) - elif self._encoding == type(self)._DIRECT_ENC: - lit_list = [self._get_lit_list(i),self._get_lit_list(j)] - or_li = [Prop.lor(\ - Prop.land(\ - lit_list[0][self._object_to_pos(edge[0])],\ - lit_list[1][self._object_to_pos(edge[1])]),\ - Prop.land(\ - lit_list[0][self._object_to_pos(edge[1])],\ - lit_list[1][self._object_to_pos(edge[0])]),\ - )\ - for edge in self.vertex_to_object(self._edges)] - res = Prop.binop_batch(Prop.get_lor_tag(), or_li) - elif self._encoding == type(self)._LOG_ENC: - or_li = [] - for edge in self.vertex_to_object(self._edges): - li = Prop.bitwise_binop(Prop.get_iff_tag(), \ - self._get_lit_list(i), \ - self._get_lit_list(edge[0])) - li += Prop.bitwise_binop(Prop.get_iff_tag(), \ - self._get_lit_list(j), \ - self._get_lit_list(edge[1])) - res0 = Prop.binop_batch(Prop.get_land_tag(), li) - li = Prop.bitwise_binop(Prop.get_iff_tag(), \ - self._get_lit_list(i), \ - self._get_lit_list(edge[1])) - li += Prop.bitwise_binop(Prop.get_iff_tag(), \ - self._get_lit_list(j), \ - self._get_lit_list(edge[0])) - res1 = Prop.binop_batch(Prop.get_land_tag(), li) - or_li.append(Prop.lor(res0,res1)) - res = Prop.binop_batch(Prop.get_lor_tag(), or_li) + warn_msg = "`encode_eq()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.be_edg(x,y) + + def be_edg(self, x: int, y: int) -> Prop: + """Encodes predicate of adjacency relation, given two symbols. + + Args: + x: symbol index (constant symbol or variable symbol) + y: symbol index (constant symbol or variable symbol) + Returns: + formula object of Prop class + """ + return self._be_edg_dict[self._encoding](x,y) + + def _be_edg_edge_enc(self, x: int, y: int) -> Prop: + px = self._get_lit_list(x) + py = self._get_lit_list(y) + li = [Prop.land(px[i],py[i]) for i in range(self.code_length)] + return Prop.land(Prop.neg(self.be_eq(x,y)), + Prop.binop_batch(Prop.get_lor_tag(), li)) + + def _be_edg_direct_enc(self, x: int, y: int) -> Prop: + px = self._get_lit_list(x) + py = self._get_lit_list(y) + li = [Prop.lor( + Prop.land( + px[self._object_to_pos(v)], + py[self._object_to_pos(w)]), + Prop.land( + px[self._object_to_pos(w)], + py[self._object_to_pos(v)])) + for v,w in self._G.all_edges()] + if len(li) == 0: + return Prop.false_const() else: - raise Exception(f"NotImplementedError") - return res + return Prop.land(Prop.neg(self.be_eq(x,y)), + Prop.binop_batch(Prop.get_lor_tag(), li)) + + def _be_edg_arbit_enc(self, x: int, y: int) -> Prop: + li = [Prop.lor( + Prop.land( + self.be_eq(x,v), + self.be_eq(y,w)), + Prop.land( + self.be_eq(x,w), + self.be_eq(y,v))) + for v,w in self._G.all_edges()] + if len(li) == 0: + return Prop.false_const() + else: + return Prop.land(Prop.neg(self.be_eq(x,y)), + Prop.binop_batch(Prop.get_lor_tag(), li)) + + def _be_edg_vertex_enc(self, x: int, y: int) -> Prop: + def _aux_index(x: int, i: int) -> int: + if (x,i) not in self._edg_enc_dict: + v = NameMgr.get_aux_index() + assert v not in self._edg_dec_dict + self._edg_enc_dict[(x,i)] = v + self._edg_dec_dict[v] = (x,i) + return self._edg_enc_dict[(x,i)] + px = self._get_lit_list(x) + py = self._get_lit_list(y) + sx = [Prop.var(_aux_index(x,i)) + for i in range(self.code_length)] + sy = [Prop.var(_aux_index(y,i)) + for i in range(self.code_length)] + sx.append(Prop.false_const()) + sy.append(Prop.false_const()) + li = [ + Prop.binop_batch(Prop.get_land_tag(), + [px[i],py[i],Prop.lor(Prop.neg(sx[i-1]),Prop.neg(sy[i-1]))]) + for i in range(self.code_length)] + return Prop.land(Prop.neg(self.be_eq(x,y)), + Prop.binop_batch(Prop.get_lor_tag(), li)) + + def be_lt(self, x: int, y: int) -> Prop: + """Encodes predicate of less-than relation, given two symbols. + + Args: + x: symbol index (constant symbol or variable symbol) + y: symbol index (constant symbol or variable symbol) + Returns: + formula object of Prop class + """ + def _aux_index(x: int, y: int, i: int) -> int: + if (x,y,i) not in self._le_enc_dict: + v = NameMgr.get_aux_index() + assert v not in self._le_dec_dict + self._le_enc_dict[(x,y,i)] = v + self._le_dec_dict[v] = (x,y,i) + return self._le_enc_dict[(x,y,i)] + px = self._get_lit_list(x) + py = self._get_lit_list(y) + sx = [Prop.var(_aux_index(x,y,i)) + for i in range(self.code_length)] + sx.append(Prop.false_const()) + li = [ + Prop.land( + Prop.implies( + Prop.neg(sx[i]), + Prop.iff(px[i],py[i])), + Prop.iff( + sx[i], + Prop.lor( + sx[i+1], + Prop.land(Prop.neg(px[i]),py[i])))) + for i in range(self.code_length)] + li.append(Prop.iff(sx[0],Prop.true_const())) + return Prop.binop_batch(Prop.get_land_tag(), li) def encode_T(self) -> Prop: + """Encodes true constant object of Fog class to Prop object.""" + warn_msg = "`encode_T()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.be_T() + + def be_T(self) -> Prop: """Encodes true constant object of Fog class to Prop object.""" return Prop.true_const() def encode_F(self) -> Prop: + """Encodes false constant object of Fog class to Prop object.""" + warn_msg = "`encode_F()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return self.be_F() + + def be_F(self) -> Prop: """Encodes false constant object of Fog class to Prop object.""" return Prop.false_const() - def compute_domain_constraint(self, var: int) -> Prop: - """Computes domain constraint for a first-order variable. + def compute_auxiliary_constraint(self, f: Fog) -> Prop: + """Auxiliary constraint encoder Args: - var: index of a first-order variable symbol + f: a first-order formula object of Fog class Returns: formula object of Prop class """ - if not NameMgr.is_variable(var): - raise Exception(f"symbol index {var} is not a variable symbol") - res = None - if self._encoding in (type(self)._EDGE_ENC, type(self)._CLIQUE_ENC): - res = self._compute_domain_constraint_DNF(var) - elif self._encoding == type(self)._DIRECT_ENC: - res = self._compute_domain_constraint_direct_encoding(var) - elif self._encoding == type(self)._LOG_ENC: - res = self._compute_domain_constraint_log_encoding(var) - else: - raise Exception(f"NotImplementedError") - return res + return self._be_aux_const_dict[self._encoding](f) + + def _compute_auxiliary_constraint_direct_enc(self, f:Fog) -> Prop: + return Prop.true_const() + + def _compute_auxiliary_constraint_log_enc(self, f:Fog) -> Prop: + return Prop.true_const() + + def _compute_auxiliary_constraint_edge_enc(self, f:Fog) -> Prop: + return Prop.true_const() + + def _compute_auxiliary_constraint_clique_enc(self, f:Fog) -> Prop: + return Prop.true_const() - def _compute_domain_constraint_DNF(self, index: int)\ - -> Prop: - """Computes a constraint in DNF for a given first-order variable - such that the variable runs over the domain of this structure. + def _compute_auxiliary_constraint_vertex_enc(self, f:Fog) -> Prop: + def _aux_index(x: int, i: int) -> int: + if (x,i) not in self._edg_enc_dict: + v = NameMgr.get_aux_index() + assert v not in self._edg_dec_dict + self._edg_enc_dict[(x,i)] = v + self._edg_dec_dict[v] = (x,i) + return self._edg_enc_dict[(x,i)] + # find all edg(x,y) occurring in f, wherre x and y are free variables + # or constants. + edg_set = set() + for i, g in op.generate_subformulas(f): + if i == 0: + if g.is_edg_atom(): + # edg(x,y) + x = g.get_atom_arg(1) + y = g.get_atom_arg(2) + edg_set.add((x,y)) + # compute all auxiliary constraints. + li = [] + for x,y in edg_set: + px = self._get_lit_list(x) + py = self._get_lit_list(y) + sx = [Prop.var(_aux_index(x,i)) + for i in range(self.code_length)] + sy = [Prop.var(_aux_index(y,i)) + for i in range(self.code_length)] + sx.append(Prop.false_const()) + sy.append(Prop.false_const()) + li += [ + Prop.iff( + sx[i], + Prop.lor(sx[i-1],Prop.land(Prop.neg(sx[i-1]),px[i]))) + for i in range(self.code_length)] + li += [ + Prop.iff( + sy[i], + Prop.lor(sy[i-1],Prop.land(Prop.neg(sy[i-1]),py[i]))) + for i in range(self.code_length)] + return Prop.binop_batch(Prop.get_land_tag(), li) + + def compute_domain_constraint(self, x: int) -> Prop: + """Domain constraint encoder Args: - index: a first-order variable index + x: index of a first-order variable symbol + Returns: + formula object of Prop class + """ + return self._be_domain_dict[self._encoding](x) + def _be_domain_arbit_enc(self, x: int) -> Prop: + """Domain constraint encoder for arbitrary encoding. + + Args: + x: index of a first-order variable symbol Returns: - Prop: Prop class object of the computed DNF constraint. + formula object of Prop class """ - dnf = [] - for obj in self.domain: - lits = self._get_lit_list(index) - term = [ - lits[pos] if pos+1 in self.get_code(obj) \ - else Prop.neg(lits[pos]) \ - for pos in range(self.code_length) - ] - dnf.append(Prop.binop_batch(Prop.get_land_tag(), term)) - return Prop.binop_batch(Prop.get_lor_tag(), dnf) - - def _compute_domain_constraint_direct_encoding(self, index: int) -> Prop: - """Computes domain constraint for a first-order variable - (direct-encoding version). + li = [self.be_eq(x,v) for v in self.domain] + return Prop.binop_batch(Prop.get_lor_tag(), li) + + def _be_domain_direct_enc(self, x: int) -> Prop: + """Domain constraint encoder for direct-encoding. Args: - var: index of a first-order variable symbol + x: index of a first-order variable symbol Returns: formula object of Prop class """ - if self._encoding != type(self)._DIRECT_ENC: - raise Exception(\ - f"encoding type {self._encoding} does not match with this method") - - lits = self._get_lit_list(index) - # at least one constraint - at_least_one = Prop.binop_batch(Prop.get_lor_tag(), lits) - # at most one constraint - term = [Prop.neg(Prop.land(lits[i],lits[j])) \ - for i in range(len(lits))\ - for j in range(i+1,len(lits))] - at_most_one = Prop.binop_batch(Prop.get_land_tag(), term) - return Prop.land(at_most_one, at_least_one) - - def _compute_domain_constraint_log_encoding(self, index: int) -> Prop: - """Computes domain constraint for a first-order variable - (log-encoding version). + li = self.get_boolean_var_list(x) + return Prop.land( + Prop.binop_batch(Prop.get_lor_tag(), list(map(Prop.var,li))), + Prop.read(constraints.at_most_r(li, r=1))) + + def _be_domain_log_enc(self, x: int) -> Prop: + """Domain constraint encoder for log-encoding. Args: - var: index of a first-order variable symbol + x: index of a first-order variable symbol Returns: formula object of Prop class """ - if self._encoding != type(self)._LOG_ENC: - raise Exception(\ - f"encoding type {self._encoding} does not match with this method") - smaller_li = [Prop.false_const()] - greater_li = [Prop.true_const()] - - smaller_li += self._get_lit_list(index) - greater_li += self._get_lit_list(\ - self.vertex_to_object(self._verts[-1])) - - acc = [] - while len(smaller_li) > 0: - if greater_li[0] == Prop.true_const(): - li = [Prop.neg(smaller_li[0])] - li += Prop.bitwise_binop(Prop.get_iff_tag(),\ - smaller_li[1:],\ - greater_li[1:]) - acc.append(Prop.binop_batch(Prop.get_land_tag(), li)) - smaller_li = smaller_li[1:] - greater_li = greater_li[1:] - res = Prop.binop_batch(Prop.get_lor_tag(), acc) - return res + return Prop.lor(self.be_lt(x, self.max_v), self.be_eq(x, self.max_v)) + + def _be_domain_vertex_enc(self, x: int) -> Prop: + """Domain constraint encoder for vertex-encoding. + + Args: + x: index of a first-order variable symbol + Returns: + formula object of Prop class + """ + def _aux_index(x: int, i: int) -> int: + if (x,i) not in self._domain_enc_dict: + v = NameMgr.get_aux_index() + assert v not in self._domain_dec_dict + self._domain_enc_dict[(x,i)] = v + self._domain_dec_dict[v] = (x,i) + return self._domain_enc_dict[(x,i)] + def _conjunction_out_neighborhood(v: int, px: list) -> Prop: + N = self._out_neighborhood(v) + if len(N) == 0: + return Prop.true_const() + else: + return Prop.binop_batch(Prop.get_land_tag(), + [px[self._object_to_pos(w)] for w in N]) + def _disjunction_in_neighborhood(v: int, px: list, sx: list) -> Prop: + N = self._in_neighborhood(v) + if len(N) == 0: + return Prop.false_const() + else: + return Prop.binop_batch(Prop.get_lor_tag(), + [Prop.land(px[self._object_to_pos(w)], + sx[self._object_to_pos(w)]) for w in N]) + px = self._get_lit_list(x) + sx = [Prop.var(_aux_index(x,i)) + for i in range(self.code_length)] + res = Prop.read(constraints.at_most_r( + [_aux_index(x,i) for i in range(self.code_length)], r=1)) + li = [Prop.binop_batch(Prop.get_lor_tag(), sx), res] + li += [Prop.implies(sx[i],px[i]) for i in range(self.code_length)] + li += [Prop.implies( + Prop.land(px[self._object_to_pos(v)], + sx[self._object_to_pos(v)]), + _conjunction_out_neighborhood(v,px)) + for v in self._G.all_vertices()] + li += [Prop.implies( + Prop.land(px[self._object_to_pos(v)], + Prop.neg(sx[self._object_to_pos(v)])), + _disjunction_in_neighborhood(v,px,sx)) + for v in self._G.all_vertices()] + return Prop.binop_batch(Prop.get_land_tag(), li) diff --git a/pygplib/name.py b/pygplib/name.py index 4fa9798..5c09ffd 100644 --- a/pygplib/name.py +++ b/pygplib/name.py @@ -39,6 +39,10 @@ def lookup_index(cls, name: str) -> int: Note: If not exists, a new index will be assigned. + The method fails if names of auxiliary variables introduced by + get_aux_index are given. + If the leading character is "_" and the name is already registered, + then this means that it is the index of an auxiliary variable. Args: name: name of (variable or constant) symbol @@ -46,6 +50,8 @@ def lookup_index(cls, name: str) -> int: Raises: ValueError: if the leading character is not alphabetic. """ + if name[:1] == "_" and name in cls._dict: + return cls._dict[name] if not name[:1].isalpha(): raise ValueError("Leading character must be alphabetic.") if name not in cls._dict: @@ -53,6 +59,18 @@ def lookup_index(cls, name: str) -> int: cls._dict[name] = len(cls._inv_list) return cls._dict[name] + + @classmethod + def get_aux_index(cls) -> int: + """Gets the index of a new auxiliary variable to introduce. + + The leading character of variable name is "_" to avoid collision. + """ + name = f"_{len(cls._inv_list)+1}" + cls._inv_list.append(name) + cls._dict[name] = len(cls._inv_list) + return cls._dict[name] + @classmethod def lookup_name(cls, index: int) -> str: """Look-up name from index. @@ -98,7 +116,7 @@ def is_variable(cls, index: int) -> bool: if not cls.has_name(index): raise ValueError(f"No name is linked to {index}") name = cls.lookup_name(index) - return name[:1].islower() + return name[:1].islower() or name[:1] == "_" @classmethod def is_constant(cls, index: int) -> bool: diff --git a/pygplib/op.py b/pygplib/op.py index f719575..fb13f9d 100644 --- a/pygplib/op.py +++ b/pygplib/op.py @@ -2,6 +2,7 @@ import sys import warnings +from typing import Iterable from .absexpr import AbsExpr from .absexpr import IndexGen @@ -20,7 +21,38 @@ def generator(f: AbsExpr, skip_shared: bool = False): occurences of each such subformula are ignored. Note: - It would be safe to call generator with skip_shared False for + It would be safe to call with skip_shared False for + first-order formulas. Consider, for instance, (? [x] : x) & x. + Since the second and the third occurrences of x are shared, + the third occurence of x is skipped if skip_shared is enabled. + However, there are cases for which the third one should be treated + separately from the second one. + For instance, consider computing all free variables from the above + formula. If the third occurrence of x is skipped, no free variable + will be mistakenly output. + + Args: + f: formula to be traversed + skip_shared: indicates whether shared formulas are skipped. + + Yields: + (i, subexpr): when subexpr appears in prefix order (for i=0), + in infix order (for i=1), and in postfix order (for i=2). + """ + warn_msg = "`generator()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) + return generate_subformulas(f, skip_shared=skip_shared) + +def generate_subformulas(f: AbsExpr, skip_shared: bool = False): + """Yeilds all subformulas with left subformula first. + + If skip_shared is set True, shared subformulas are + skipped, that is, if there are multiple occurrences of + a synactically identical subformula, the second or later + occurences of each such subformula are ignored. + + Note: + It would be safe to call with skip_shared False for first-order formulas. Consider, for instance, (? [x] : x) & x. Since the second and the third occurrences of x are shared, the third occurence of x is skipped if skip_shared is enabled. @@ -43,7 +75,7 @@ def generator(f: AbsExpr, skip_shared: bool = False): subexpr = f while True: - if subexpr.is_atom_term() or (skip_shared and subexpr in done): + if subexpr.is_atom() or (skip_shared and subexpr in done): if not (skip_shared and subexpr in done): yield (0, subexpr) @@ -57,7 +89,7 @@ def generator(f: AbsExpr, skip_shared: bool = False): return i, subexpr = s.pop() assert not (skip_shared and subexpr in done) - if i == 0 and subexpr.is_binop_term(): + if i == 0 and subexpr.is_binop(): s.append([1, subexpr]) yield (1, subexpr) # right subformula @@ -69,6 +101,14 @@ def generator(f: AbsExpr, skip_shared: bool = False): # left subformula first subexpr = subexpr.get_operand(1) +def compute_size(f: AbsExpr) -> int: + """Computes the total number of operators and atoms.""" + res = 0 + for i, subexpr in generate_subformulas(f): + if i == 0: + res += 1 + continue + return res def to_str(f: AbsExpr) -> str: """Converts formula into string. @@ -78,7 +118,7 @@ def to_str(f: AbsExpr) -> str: """ out = "" - for i, subexpr in generator(f): + for i, subexpr in generate_subformulas(f): if i == 0: out += subexpr.make_str_pre_step() elif i == 1: @@ -109,20 +149,20 @@ def print_formula(f: AbsExpr, stream=None, graph_name="output", fmt="str") -> No raise Exception(f"invalid format {format}") out = "digraph " + f"{graph_name}" + " {\n" - for i, g in generator(f, skip_shared=True): + for i, g in generate_subformulas(f, skip_shared=True): if i == 0: out += "\t" + f"{id(g)} [label = {g.make_node_str_step()}]\n" elif i == 1: continue elif i == 2: - if g.is_binop_term(): + if g.is_binop(): out += f"\t{id(g)}" + " -> " + f"{id(g.get_operand(1))}\n" out += f"\t{id(g)}" + " -> " + f"{id(g.get_operand(2))}\n" - elif g.is_unop_term(): + elif g.is_unop(): out += f"\t{id(g)}" + " -> " + f"{id(g.get_operand(1))}\n" - elif g.is_atom_term(): + elif g.is_atom(): continue - elif isinstance(g, AbsFo) and g.is_qf_term(): + elif isinstance(g, AbsFo) and g.is_qf(): out += f"\t{id(g)}" + " -> " + f"{id(g.get_operand(1))}\n" else: raise Exception(f"unexpected term: {g.gen_key()}") @@ -191,7 +231,7 @@ def reduce_formula(f: AbsExpr, st: BaseRelSt = None) -> AbsExpr: nnf = compute_nnf(f) assoc = {} - for i, g in generator(nnf, skip_shared=True): + for i, g in generate_subformulas(nnf, skip_shared=True): if i != 2: continue if id(g) in assoc: @@ -229,7 +269,7 @@ def get_free_vars_and_consts(expr: AbsFo) -> tuple: free_vars = [] # free variables and constaints # Note: Do not make skip_shared True. Otherwise, the method, # when applied to say (? [x] : x) & x, mistakenly returns no free variable. - for i, g in generator(expr): + for i, g in generate_subformulas(expr): if i == 0: g.get_free_vars_and_consts_pre_step(bound_vars, free_vars) continue @@ -266,14 +306,14 @@ def substitute(expr: AbsFo, y: int, x: int) -> AbsFo: # Note: Do not make skip_shared True. Otherwise, the method, # when applied to say (? [x] : x) & x, mistakenly returns (? [x] : x) & x. # The correct result must be (? [x] : x) & y. - for i, g in generator(expr): + for i, g in generate_subformulas(expr): if i == 0: - if g.is_forall_term() or g.is_exists_term(): + if g.is_forall() or g.is_exists(): if g.get_bound_var() == x: nof_bound += 1 continue if i == 2: - if g.is_forall_term() or g.is_exists_term(): + if g.is_forall() or g.is_exists(): if g.get_bound_var() == x: nof_bound -= 1 if id(g) in assoc: @@ -295,7 +335,7 @@ def _eliminate_qf_step( to import op module, which results in circular import. """ - if expr.is_neg_term(): + if expr.is_neg(): g = assoc[id(expr.get_operand(1))] assoc[id(expr)] = type(expr).neg(g) return @@ -305,23 +345,23 @@ def _eliminate_qf_step( return if ( - expr.is_land_term() - or expr.is_lor_term() - or expr.is_implies_term() - or expr.is_iff_term() + expr.is_land() + or expr.is_lor() + or expr.is_implies() + or expr.is_iff() ): left = assoc[id(expr.get_operand(1))] right = assoc[id(expr.get_operand(2))] assoc[id(expr)] = type(expr).binop(expr.get_tag(), left, right) return - if expr.is_forall_term() or expr.is_exists_term(): + if expr.is_forall() or expr.is_exists(): bvar = expr.get_bound_var() g = assoc[id(expr.get_operand(1))] li = [substitute(g, d, bvar) for d in const_symb_tup] - if expr.is_forall_term(): + if expr.is_forall(): acc = type(expr).binop_batch(type(expr).get_land_tag(), li) else: acc = type(expr).binop_batch(type(expr).get_lor_tag(), li) @@ -329,7 +369,7 @@ def _eliminate_qf_step( assoc[id(expr)] = acc return - if expr.is_edg_atom() or expr.is_eq_atom(): + if expr.is_edg_atom() or expr.is_eq_atom() or expr.is_lt_atom(): assoc[id(expr)] = expr return @@ -355,7 +395,7 @@ def eliminate_qf(expr: AbsFo, st: BaseRelSt) -> AbsFo: const_symb_tup = st.domain assoc = {} - for i, g in generator(expr): + for i, g in generate_subformulas(expr): if i != 2: continue if id(g) in assoc: @@ -365,6 +405,59 @@ def eliminate_qf(expr: AbsFo, st: BaseRelSt) -> AbsFo: assert id(expr) in assoc return assoc[id(expr)] +def _check_no_missing_variable_in_prop(f: Prop, st: BaseRelSt): + """Checks whether there is no missing variable in prop formula. + + Checks whether there is no propositional variable px[i] such that + for some first-order variable x, px[j] occurs in f but px[i] does not, + where px is the list of propositional variables of x. + """ + prop_var_list = [ + g.get_var_index() + for i, g in generate_subformulas(f, skip_shared=True) + if i == 2 and g.is_var_atom() + ] + fo_var_list = [st.get_variable_position_pair(p)[0]\ + for p in prop_var_list if st.is_decodable_boolean_var(p)] + for x in fo_var_list: + for p in st.get_boolean_var_list(x): + assert p in prop_var_list + +def _check_no_missing_variable_in_assign(assign: list, st: BaseRelSt): + """Checks whether there is no missing variable in assign of internal variables. + + Checks whether there is no cnf variable px[i] such that + for some first-order variable x, px[j] occurs in f but px[i] does not, + where px is the list of propositional variables of x. + """ + prop_var_set = set() + for lit in assign: + var = abs(lit) + if st.is_decodable_boolean_var(var): + prop_var_set.add(var) + fo_var_list = [st.get_variable_position_pair(p)[0] for p in prop_var_set] + for x in fo_var_list: + for p in st.get_boolean_var_list(x): + assert p in prop_var_set + +def _check_no_missing_variable_in_cnf(cnf: list, st: BaseRelSt): + """Checks whether there is no missing variable in cnf of internal variables. + + Checks whether there is no cnf variable px[i] such that + for some first-order variable x, px[j] occurs in f but px[i] does not, + where px is the list of propositional variables of x. + """ + prop_var_set = set() + for clause in cnf: + for lit in clause: + var = abs(lit) + if st.is_decodable_boolean_var(var): + prop_var_set.add(var) + fo_var_list = [st.get_variable_position_pair(p)[0] for p in prop_var_set] + for x in fo_var_list: + for p in st.get_boolean_var_list(x): + assert p in prop_var_set + def perform_boolean_encoding(f: AbsFo, st: BaseRelSt) -> Prop: """Converts a first-order formula into an equiv. propositional formula. @@ -386,7 +479,7 @@ def perform_boolean_encoding(f: AbsFo, st: BaseRelSt) -> Prop: qf_free = eliminate_qf(f, st) assoc = {} - for i, g in generator(qf_free, skip_shared=True): + for i, g in generate_subformulas(qf_free, skip_shared=True): if i != 2: continue if id(g) in assoc: @@ -417,10 +510,8 @@ def propnize(f: AbsFo, st: BaseRelSt) -> Prop: return perform_boolean_encoding(f, st) - - # Propositional Logic Methods -def compute_cnf(tup: tuple) -> tuple[int, int, tuple[tuple[int, ...], ...]]: +def compute_cnf(expr_list: Iterable) -> tuple[int, int, tuple[tuple[int, ...], ...]]: """Computes Conjunction Normal Form for the tuple of formulas. Note: @@ -437,33 +528,27 @@ def compute_cnf(tup: tuple) -> tuple[int, int, tuple[tuple[int, ...], ...]]: * the number of auxiliary variables introduced during CNF-encoding, * a tuple of clauses, each clause is a tuple of variable indices. """ - if type(tup) is not tuple: - raise TypeError("Tuple of Props must be given as input argument.") - if tup == (): - raise TypeError("Tuple must be non-empty") - if False in [issubclass(type(f), Prop) for f in tup]: + if False in [issubclass(type(f), Prop) for f in expr_list]: raise TypeError( "Expression must be \ an instance of Prop or its subclass" ) - - expr_list = [reduce_formula(f) for f in tup] - - if type(tup[0]).false_const() in expr_list: + expr_list = [reduce_formula(f) for f in expr_list] + if len(expr_list) == 0: + raise Exception("empty list is given") + if type(expr_list[0]).false_const() in expr_list: return 0, 0, ((),) # UNSAT - # the largest index of variables appearing in formulas base = 0 for f in expr_list: var_list = [ g.get_var_index() - for i, g in generator(f, skip_shared=True) + for i, g in generate_subformulas(f, skip_shared=True) if i == 2 and g.is_var_atom() ] if var_list != []: val = max(var_list) base = val if base < val else base - # next index of aux. variable igen = IndexGen(base + 1) cnf = [] @@ -472,15 +557,13 @@ def compute_cnf(tup: tuple) -> tuple[int, int, tuple[tuple[int, ...], ...]]: if f.is_true_atom(): continue assoc = {} - for i, g in generator(f, skip_shared=True): + for i, g in generate_subformulas(f, skip_shared=True): if i != 2: continue if id(g) in assoc: continue g.compute_cnf_step(igen, assoc, cnf) - assert id(f) in assoc cnf.append((assoc[id(f)],)) - naux = igen.get_count() # nof aux. variables return base, naux, tuple(cnf) diff --git a/pygplib/prop.py b/pygplib/prop.py index f0f4053..6e96111 100644 --- a/pygplib/prop.py +++ b/pygplib/prop.py @@ -15,15 +15,20 @@ class Prop(AbsProp): """Expression of Propositional Logic + :: + + Node | self._aux[0] | self._aux[1] + -----------------|----------------|-------------- + True constant | - | - + False constant | - | - + Boolean Variable | variable index | - + Attributes: _VAR: string, representing Boolean variable. _ATOM_TAGS: tuple of strings, representing types of atom. _BINOP_TAGS: tuple of strings, representing types of binary operator. _UNOP_TAGS: tuple of strings, representing types of uniary operator. _EXPR_TAGS: tuple of available tags in this class. - _ATOM_BEGIN_POS: beginning position of the atom part of the aux field. - _ATOM_LEN: length of the atom part of the aux field. - _AUX_LEN: length of the aux field. """ # Tag-related Variables and Methods @@ -35,10 +40,6 @@ class Prop(AbsProp): _EXPR_TAGS = _ATOM_TAGS + _BINOP_TAGS + _UNOP_TAGS - _ATOM_BEGIN_POS = AbsProp._AUX_LEN - _ATOM_LEN = 1 - _AUX_LEN = _ATOM_LEN + AbsProp._AUX_LEN - @classmethod def get_var_tag(cls): """Gets tag of var.""" @@ -55,18 +56,19 @@ def var(cls, index: int): # Instance Methods def get_atom_value(self, i: int) -> int: """Gets the index of the Boolean variable.""" + warn_msg = "`get_atom_value()` has been deprecated and will be removed in v3.0.0" + warnings.warn(warn_msg, UserWarning) if not self.is_var_atom(): raise Exception("The formula is not a Boolean variable.") - if not 0 < i <= type(self)._ATOM_LEN: - raise IndexError( - "Value index \ - should range 1 to type(self)._ATOM_LEN" - ) - return self._aux[type(self)._ATOM_BEGIN_POS + i - 1] + if i != 1: + raise IndexError("Value index should be 1") + return self.get_var_index() def get_var_index(self): """Gets the index of the Boolean variable.""" - return self.get_atom_value(1) + if not self.is_var_atom(): + raise Exception("The formula is not a Boolean variable.") + return self._aux[0] def is_var_atom(self): """Is the formula a Boolean variable ?""" @@ -105,8 +107,7 @@ def read(cls, formula_str: str) -> AbsExpr: The name of a variable symbol is defined as follows:: - ::= ( | | "_")* "@" \ - + ::= ( | | "_")* ::= ["a"-"z"] ::= ["0"-"9"] ::= ["1"-"9"](["0"-"9"])* @@ -128,15 +129,15 @@ def read(cls, formula_str: str) -> AbsExpr: that binary operations are left-associative, unary operation is right-associative. """ + if formula_str.strip() == "": + raise Exception("no formula given") # The following tokens are suppressed in the parsed result. COMMA = pp.Suppress(cls.get_comma_tag()) LPAREN = pp.Suppress(cls.get_lparen_tag()) RPAREN = pp.Suppress(cls.get_rparen_tag()) - # Variable symbols are strings that match [a-z][a-z0-9_]*@[0-9]\+ . - VAR = pp.Combine(pp.Word(pp.srange("[a-z]"), pp.srange("[a-z0-9_]")) \ - + "@" + pp.Word(pp.srange("[0-9]"), pp.srange("[0-9]"))) + VAR = pp.Word(pp.srange("[a-z_]"), pp.srange("[a-z0-9_]")) TRUE = pp.Literal(cls.get_true_const_tag()) FALSE = pp.Literal(cls.get_false_const_tag()) @@ -157,14 +158,6 @@ def read(cls, formula_str: str) -> AbsExpr: @VAR.set_parse_action def action_var(string, location, tokens): assert len(tokens) == 1, f"{tokens}" - res = tokens[0].split("@") - assert len(res) == 2 and res[1].isdigit() - NameMgr.lookup_index(res[0]) - if int(res[1]) < 1: - raise Exception( - "Positional index of tokens[0] " - + "must be greater than or equal to 1." - ) return cls.var(NameMgr.lookup_index(tokens[0])) @CONST_REL.set_parse_action @@ -217,7 +210,7 @@ def reduce_formula_step(self, assoc: dict, st: BaseRelSt) -> None: def make_str_pre_step(self) -> str: """Makes string in prefix order for this object.""" if self.is_var_atom(): - prop_index = self.get_atom_value(1) + prop_index = self.get_var_index() return f"{NameMgr.lookup_name(prop_index)}" return super().make_str_pre_step() @@ -236,6 +229,6 @@ def make_str_post_step(self) -> str: def make_node_str_step(self) -> str: """Makes string of this object for DOT print.""" if self.is_var_atom(): - prop_index = self.get_atom_value(1) + prop_index = self.get_var_index() return f'"{NameMgr.lookup_name(prop_index)}"' return super().make_node_str_step() diff --git a/pygplib/symrelst.py b/pygplib/symrelst.py index d762323..8421de7 100644 --- a/pygplib/symrelst.py +++ b/pygplib/symrelst.py @@ -11,7 +11,8 @@ class SymRelSt(BaseRelSt): There is no function symbol. """ - def __init__(self, objects: tuple[int], relation: tuple[tuple[int]]): + def __init__(self, objects: tuple[int], relation: tuple[tuple[int]],\ + msg: str = ""): """Initializes an object of SymRelSt class. Initializes a structure so that each object in domain of discourse is @@ -56,26 +57,28 @@ def __init__(self, objects: tuple[int], relation: tuple[tuple[int]]): objects: a domain of discourse, a tuple of constant symbol indices relation: a tuple of (non-empty) relation instances, where each instance need not be ordered. + msg: message printed when assertion failed """ if len(objects) != len(set(objects)): - raise Exception(f"duplicate object found: {objects}") + raise Exception(f"duplicate object found: "+msg) for obj in objects: if not NameMgr.has_name(obj): raise ValueError(\ - f"object {obj}, given as symbol index, has no name.") + f"object {obj}, given as symbol index, has no name: "+msg) if not NameMgr.is_constant(obj): raise ValueError(\ - f"{NameMgr.lookup_name(obj)} is not a constant symbol.") + f"{NameMgr.lookup_name(obj)} is not a constant symbol: "+msg) for inst in relation: if len(set(inst)) != len(inst): - raise Exception(f"duplicate objcect found: {inst}") + raise Exception(f"duplicate object found: "+msg) if False in list(map(lambda i: i > 0, inst)): - raise Exception(f"invalid relation instance: {inst}") + raise Exception(f"invalid relation instance: "+msg) super().__init__(\ objects, \ type(self)._compute_dual_hypergraph(objects, relation), \ - len(relation)) + len(relation),\ + msg=msg) @staticmethod def _compute_dual_hypergraph(vertices: tuple[int], \ diff --git a/pyproject.toml b/pyproject.toml index 4f5e4fd..17970f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "pygplib" -version = "2.2.0" +version = "2.3.0" description = "Python First-Order Graph Property Library" authors = ["Takahisa Toda "] license = "MIT" @@ -31,11 +31,13 @@ classifiers = [ [tool.poetry.dependencies] python = "^3.9" pyparsing = "^3.1.1" +simpgraph = "^1.0.0" pytest = { version ="^7.4.2", optional = true } pytest-cov = { version = "^4.1.0", optional = true } tox = { version = "^4.11.3", optional = true } python-sat = { version = "^0.1.8.dev10", optional = true } +pyunigen = { version = "^2.5.7", optional = true } argparse = { version = "^1.4.0", optional = true } Sphinx = { version = "^7.2.6", optional = true } sphinx-removed-in = { version = "^0.2.1", optional = true } @@ -43,7 +45,7 @@ sphinxcontrib-trio = { version = "^1.1.2", optional = true } pallets-sphinx-themes = { version = "^2.1.1", optional= true } [tool.poetry.extras] -test = ["pytest", "pytest-cov", "tox", "python-sat", "argparse"] +test = ["pytest", "pytest-cov", "tox", "python-sat", "argparse", "pyunigen"] docs = ["Sphinx", "sphinx-removed-in", "sphinxcontrib-trio", "pallets-sphinx-themes"] [build-system] diff --git a/tests/formula.py b/tests/formula.py new file mode 100644 index 0000000..9e479cc --- /dev/null +++ b/tests/formula.py @@ -0,0 +1,7 @@ +import random + +from pygplib import Fog, NameMgr, GrSt, Prop +import pygplib.op as op + +def random_fog(n: int, l: int, k: int, seed: int = None) -> Fog + pass diff --git a/tests/graph.py b/tests/graph.py new file mode 100644 index 0000000..7b3b59a --- /dev/null +++ b/tests/graph.py @@ -0,0 +1,39 @@ +import random +from simpgraph import SimpGraph + +def random_graph(n: int, m: int, max_retries: int = 10, + seed: int = None, + reject_isolated_vertex: bool = False, + reject_isolated_edge: bool = False) -> SimpGraph: + if n < 0 or m < 0 or m > (n*(n-1))//2: + raise ValueError("number of vertices or number of edges is invalid.") + if seed is not None: + random.seed(seed) + pairs = [(i,j)\ + for i in range(1, n+1)\ + for j in range(i+1, n+1)] + for step in range(max_retries): + G = SimpGraph() + for v in range(1,n+1): + G.add_vertex(v) + random.shuffle(pairs) + for v,w in sorted(pairs[:m]): + G.add_edge(v,w) + Graph_Generation_Success = True + if reject_isolated_vertex: + for v in G.all_vertices(): + if len(G.adj_vertices(v)) == 0: + Graph_Generation_Success = False + break + if reject_isolated_edge: + for v,w in G.all_edges(): + if len(G.adj_vertices(v)) == 1\ + and len(G.adj_vertices(w)) == 1: + assert w in G.adj_vertices(v) + assert v in G.adj_vertices(w) + Graph_Generation_Success = False + break + if not Graph_Generation_Success: + continue + return G + return None diff --git a/examples/__init__.py b/tests/name.py similarity index 100% rename from examples/__init__.py rename to tests/name.py diff --git a/tests/pool.py b/tests/pool.py new file mode 100644 index 0000000..6328d4c --- /dev/null +++ b/tests/pool.py @@ -0,0 +1,83 @@ +import random + +from pygplib import Prop, Fog, NameMgr +import pygplib.op as op + +class AbsExprPool: + def __init__(self): + self.pool = list() + + def add(self, f) -> None: + if f not in self.pool: + self.pool.append(f) + + def choice(self, minsize = 1, maxsize = None): + #return random.choice(self.pool) + li = list(filter(lambda f: maxsize is None or minsize <= op.compute_size(f) <= maxsize, self.pool)) + return random.choice(li) + + def _mutate(self, maxsize = None) -> None: + pass + + def mutate(self, n: int = 1, maxsize = None) -> None: + for i in range(n): + self._mutate(maxsize=maxsize) + +class PropPool(AbsExprPool): + def __init__(self, tags: tuple[str] = Prop._BINOP_TAGS + Prop._UNOP_TAGS): + self.tags = tuple(tags) + super().__init__() + + def _mutate(self, maxsize = None) -> None: + tag = random.choice(self.tags) + if tag in Prop._UNOP_TAGS: + f = self.choice(maxsize=maxsize) + res = Prop.neg(f) + elif tag in Prop._BINOP_TAGS: + f = self.choice(maxsize=maxsize) + g = self.choice(maxsize=maxsize) + res = Prop.binop(tag, f, g) + else: + raise Exception(f"unknown tag: {tag}") + self.add(res) + +class FogPool(AbsExprPool): + def __init__(self, tags: tuple[str] = Fog._QF_TAGS + Fog._BINOP_TAGS + Fog._UNOP_TAGS): + self.tags = tuple(tags) + super().__init__() + + def _mutate(self, maxsize = None) -> None: + tag = random.choice(self.tags) + if tag in Fog._UNOP_TAGS: + f = self.choice(maxsize=maxsize) + res = Fog.neg(f) + elif tag in Fog._BINOP_TAGS: + f = self.choice(maxsize=maxsize) + g = self.choice(maxsize=maxsize) + res = Fog.binop(tag, f, g) + elif tag in Fog._QF_TAGS: + f = self.choice(maxsize=maxsize) + tup = type(self).get_vars(f) + if len(tup) > 0: + x in random.choice(tup) + res = Fog.qf(tag, f, x) + else: + res = None + else: + raise Exception(f"unknown tag: {tag}") + if res is not None: + self.add(res) + + @staticmethod + def get_vars(f: Fog): + res = set() + for i, g in generate_subformulas(f): + if i == 0: + if Fog.is_edg_atom(g)\ + or Fog.is_eq_atom(g): + for i in [1,2]: + v = g.get_atom_arg(i) + if NameMgr.is_variable(v): + res.add(v) + continue + return tuple(res) diff --git a/tests/solver.py b/tests/solver.py new file mode 100644 index 0000000..65d0705 --- /dev/null +++ b/tests/solver.py @@ -0,0 +1,26 @@ +from pysat.formula import CNF +from pysat.solvers import Solver +from pyunigen import Sampler + +from pygplib import Prop, NameMgr +import pygplib.op as op + + +def assert_satisfiable(f: Prop, msg: str = ""): + assert not f.is_false_atom() + if f.is_true_atom(): + return + base, naux, cnf = op.compute_cnf((f,)) + with Solver(bootstrap_with=CNF(from_clauses=cnf)) as solver: + assert solver.solve(), msg + +def assert_unsatisfiable(f: Prop, msg: str = ""): + assert not f.is_true_atom() + if f.is_false_atom(): + return + base, naux, cnf = op.compute_cnf((f,)) + with Solver(bootstrap_with=CNF(from_clauses=cnf)) as solver: + assert not solver.solve(), msg + +def assert_equivalence(f: Prop, g: Prop, msg: str = ""): + assert_unsatisfiable(Prop.iff(f, Prop.neg(g)), msg=msg) diff --git a/tests/test_blackbox.py b/tests/test_blackbox.py new file mode 100644 index 0000000..81aa731 --- /dev/null +++ b/tests/test_blackbox.py @@ -0,0 +1,59 @@ +from pygplib import Cnf, Prop, NameMgr, op, Fog, GrSt + +from pysat.formula import CNF +from pysat.solvers import Solver + + +def test_solving_fog_in_fixed_graph(): + tests = [ + # independent set + ("~x1=x2 & ~x1=x3 & ~x2=x3 & ~edg(x1,x2) & ~edg(x1,x3) & ~edg(x2,x3)", True), + # independent set + ("~x1=x2 & ~x1=x3 & ~x1=x4 & ~x1=x5 & ~x2=x3 & ~x2=x4 & ~x2=x5 & ~x3=x4 & ~x3=x5 & ~x4=x5 & ~edg(x1,x2) & ~edg(x1,x3) & ~edg(x1,x4) & ~edg(x1,x5) & ~edg(x2,x3) & ~edg(x2,x4) & ~edg(x2,x5) & ~edg(x3,x4) & ~edg(x3,x5) & ~edg(x4,x5)", False), + # clique + ("~x1=x2 & ~x1=x3 & ~x2=x3 & edg(x1,x2) & edg(x1,x3) & edg(x2,x3)", False), + # path + ("~x1=x2 & ~x1=x3 & ~x1=x4 & ~x2=x3 & ~x2=x4 & ~x3=x4 & edg(x1,x2) & edg(x2,x3) & edg(x3,x4)", True), + # cycle + ("~x1=x2 & ~x1=x3 & ~x1=x4 & ~x2=x3 & ~x2=x4 & ~x3=x4 & edg(x1,x2) & edg(x2,x3) & edg(x3,x4) & edg(x4,x1)", True), + # star + ("~x1=x2 & ~x1=x3 & ~x1=x4 & ~x2=x3 & ~x2=x4 & ~x3=x4 & edg(x1,x2) & edg(x1,x3) & edg(x1,x4)", True), + ] + NameMgr.clear() + # + # V1 ------- V3 + # | | + # | | + # V2---V5 | + # | | | + # | | | + # V4---V7 V6 + # + vertex_list = [1,2,3,4,5,6,7] # vertices of G + edge_list = [(1,2),(1,3),(2,4),(2,5),(3,6),(4,7),(5,7)] # edges of G + + for test_str, expected in tests: + f = Fog.read(test_str) + for encoding in ["direct", "log", "vertex", "edge", "clique"]: + st = GrSt(vertex_list, edge_list, encoding=encoding) + #assert False, f"{st._codes}" + li = [] + li.append(op.perform_boolean_encoding(f, st)) + li.append(st.compute_auxiliary_constraint(f)) + li += [st.compute_domain_constraint(v) \ + for v in op.get_free_vars(f)] + g = Prop.binop_batch(Prop.get_land_tag(), li) + op._check_no_missing_variable_in_prop(g, st) + mgr = Cnf(li, st=st, check_cnf=True) + op._check_no_missing_variable_in_cnf(mgr._orig_cnf, st) + with Solver(bootstrap_with=CNF(from_clauses=mgr.cnf)) as solver: + solvable = solver.solve() + if solvable: + ext_assign = solver.get_model() + assert len(ext_assign) == mgr.nvar + int_assign = mgr.decode_assignment(ext_assign) + op._check_no_missing_variable_in_assign(int_assign, st) + fo_assign = st.decode_assignment(int_assign) + ans = [st.object_to_vertex(fo_assign[key]) \ + for key in fo_assign.keys()] + assert solvable == expected, f"encoding={encoding},test_str={test_str}" diff --git a/tests/test_bmc.py b/tests/test_bmc.py deleted file mode 100644 index 99aa9f8..0000000 --- a/tests/test_bmc.py +++ /dev/null @@ -1,187 +0,0 @@ -from pysat.formula import CNF -from pysat.solvers import Solver - -from pygplib import Fog, NameMgr, GrSt, Prop -from examples.bmc import Bmc - - -def test_bmc(): - NameMgr.clear() - N = 7 - # - # V1-------V3 - # | | - # V2--V5 | - # | | | - # V4--V7 V6 - # - vertex_list = [1,2,3,4,5,6,7] - edge_list = [(1,2),(1,3),(2,4),(2,5),(3,6),(4,7),(5,7)] - - for encoding in ["edge", "clique", "direct", "log"]: - st = GrSt(vertex_list,edge_list,encoding=encoding) - - state_expr = Fog.read( - " (~ x1=x2 & ~ edg(x1,x2)) " - + "& (~ x1=x3 & ~ edg(x1,x3)) " - + "& (~ x2=x3 & ~ edg(x2,x3)) " - ) - trans_expr = None - - tests = ( - ((3, 4, 5), (2, 7, 6), "TJ", "SAT"), - ((4, 3, 5), (6, 7, 2), "TJ", "SAT"), - ((3, 4, 5), (2, 7, 6), "TS", "UNSAT"), - ((2, 7, 3), (1, 4, 6), "TJ", "SAT"), - ((2, 6, 7), (2, 7, 6), "TS", "UNSAT"), - ) - - for test_ini, test_fin, test_trans, expected in tests: - ini_expr = Fog.read( - f" x1=V{test_ini[0]} " + f"& x2=V{test_ini[1]} " + f"& x3=V{test_ini[2]} " - ) - fin_expr = Fog.read( - f" x1=V{test_fin[0]} " + f"& x2=V{test_fin[1]} " + f"& x3=V{test_fin[2]} " - ) - - bmc = Bmc(state_expr, trans_expr, ini_expr, fin_expr, st, \ - trans_type=test_trans) - - assert bmc._nof_free_vars == len(bmc._next_vars) - - for i in range(bmc._nof_free_vars): - v = bmc._curr_vars[i] - w = bmc._next_vars[i] - assert bmc._varloc[v] == i - assert bmc._varloc[w] == i - - for v in bmc._curr_vars: - i = bmc._varloc[v] - assert bmc._curr_vars[i] == v - - for w in bmc._next_vars: - i = bmc._varloc[w] - assert bmc._next_vars[i] == w - - solved = False - for step in range(10): - bmc.generate_cnf(step) - - cnf = CNF(from_clauses=bmc.cnf) - with Solver(bootstrap_with=cnf) as solver: - for m in solver.enum_models(): - m = solver.get_model() - ans = [] - for assign in bmc.decode(m): - res = st.decode_assignment(assign) - ans.append([st.object_to_vertex(res[key]) \ - for key in sorted(res.keys())]) - validate_ans(ans, N, edge_list, test_ini, test_fin, test_trans) - solved = True - assert expected == "SAT" - if solved: - break - - assert solved or expected == "UNSAT" - -def test_bmc_partitioning_order(): - NameMgr.clear() - N = 7 - # - # V1-------V3 - # | | - # V2--V5 | - # | | | - # V4--V7 V6 - # - vertex_list = [1,2,3,4,5,6,7] - edge_list = [(1,2),(1,3),(2,4),(2,5),(3,6),(4,7),(5,7)] - - for encoding in ["edge", "clique", "direct", "log"]: - st = GrSt(vertex_list,edge_list,encoding=encoding) - Fog.partitioning_order = True - - state_expr = Fog.read( - " (~ x1=x2 & ~ edg(x1,x2)) " - + "& (~ x1=x3 & ~ edg(x1,x3)) " - + "& (~ x2=x3 & ~ edg(x2,x3)) " - ) - trans_expr = None - - tests = ( - ((3, 4, 5), (2, 7, 6), "TJ", "SAT"), - ((4, 3, 5), (6, 7, 2), "TJ", "SAT"), - ((3, 4, 5), (2, 7, 6), "TS", "UNSAT"), - ((2, 7, 3), (1, 4, 6), "TJ", "SAT"), - ((2, 6, 7), (2, 7, 6), "TS", "UNSAT"), - ) - - for test_ini, test_fin, test_trans, expected in tests: - ini_expr = Fog.read( - f" x1=V{test_ini[0]} " + f"& x2=V{test_ini[1]} " + f"& x3=V{test_ini[2]} " - ) - fin_expr = Fog.read( - f" x1=V{test_fin[0]} " + f"& x2=V{test_fin[1]} " + f"& x3=V{test_fin[2]} " - ) - - bmc = Bmc(state_expr, trans_expr, ini_expr, fin_expr, st, \ - trans_type=test_trans) - - assert bmc._nof_free_vars == len(bmc._next_vars) - - for i in range(bmc._nof_free_vars): - v = bmc._curr_vars[i] - w = bmc._next_vars[i] - assert bmc._varloc[v] == i - assert bmc._varloc[w] == i - - for v in bmc._curr_vars: - i = bmc._varloc[v] - assert bmc._curr_vars[i] == v - - for w in bmc._next_vars: - i = bmc._varloc[w] - assert bmc._next_vars[i] == w - - solved = False - for step in range(10): - bmc.generate_cnf(step) - - cnf = CNF(from_clauses=bmc.cnf) - with Solver(bootstrap_with=cnf) as solver: - for m in solver.enum_models(): - m = solver.get_model() - ans = [] - for assign in bmc.decode(m): - res = st.decode_assignment(assign) - ans.append([st.object_to_vertex(res[key]) \ - for key in sorted(res.keys())]) - validate_ans(ans, N, edge_list, test_ini, test_fin, test_trans) - solved = True - assert expected == "SAT" - if solved: - break - - assert solved or expected == "UNSAT" - Fog.partitioning_order = False - -def validate_ans(ans, n, edge_list, ini, fin, trans_type): - assert sorted(ans[0]) == sorted(ini) - assert sorted(ans[-1]) == sorted(fin) - - prev = () - for curr in ans: - for v in curr: - for w in curr: - assert tuple(sorted([abs(v),abs(w)])) not in edge_list, \ - f"{abs(v)}-{abs(w)}, curr={CURR}" - - if prev != (): - v = tuple(set(curr) - set(prev)) - w = tuple(set(prev) - set(curr)) - assert len(v) == 1 - assert len(w) == 1 - if trans_type == "TS": - assert E[abs(v[0])][abs(w[0])] == 1 - - prev = curr diff --git a/tests/test_cnf.py b/tests/test_cnf.py deleted file mode 100644 index 2093148..0000000 --- a/tests/test_cnf.py +++ /dev/null @@ -1,48 +0,0 @@ -from pygplib import Cnf, Prop, NameMgr, op - - -def test_cnf_mgr(): - tests = [ - "T", - "F", - "x@1", - "~ x@1", - "x@1 & x@2", - "x@1 | x@2", - "x@1 -> x@2", - "x@1 <-> x@2", - "x@1 <-> x@2 & x@3", - "T | ~ x@1 <-> x@2 & x@3", - ] - NameMgr.clear() - - for test_str in tests: - res = Prop.read(test_str) - base, naux, cnf1 = op.compute_cnf((res,)) - - if cnf1 == (): - cnf_set1 = set() - elif cnf1 == ((),): - cnf_set1 = {""} - else: - cnf_set1 = {",".join(map(str, sorted(cls))) for cls in cnf1} - - mgr = Cnf((res,)) - assert mgr.get_ncls() == len(cnf1) - - cnf_set2 = set() - for pos in range(mgr.get_ncls()): - cls = map(mgr.decode_lit, mgr.get_clause(pos)) - cls_str = ",".join(map(str, sorted(cls))) - cnf_set2.add(cls_str) - - assert cnf_set1 == cnf_set2 - - max_var = 0 - for pos in range(mgr.get_ncls()): - if len(mgr.get_clause(pos)) == 0: - continue - val = max(mgr.get_clause(pos)) - max_var = val if max_var < val else max_var - - assert mgr.get_nvar() == max_var diff --git a/tests/test_constraints.py b/tests/test_constraints.py new file mode 100644 index 0000000..d1d8bd7 --- /dev/null +++ b/tests/test_constraints.py @@ -0,0 +1,38 @@ +import random +from typing import Iterable + +from pygplib import Prop, NameMgr +import pygplib.op as op +import pygplib.constraints as const + +from . import solver +from . import tools + +def test_at_most_r(): + NameMgr.clear() + max_n = 100 + x = [NameMgr.lookup_index(f"x{i}") for i in range(1,max_n+1)] + for step in range(3): + random.shuffle(x) + n = 10 + r = random.randint(0,n) + _test_at_most_r(x[:n], r) + +def _test_at_most_r(x: Iterable[int], r: int): + f = Prop.read(const.at_most_r(x, r)) + code_length = len(x) + for n in range(2**code_length): + count = 0 + code = tools.binary_code(n,code_length) + msg = f"r={r},code="+",".join(map(str,code)) + g = f + for i, val in enumerate(code): + if val == 1: + g = Prop.land(g,Prop.var(x[i])) + count += 1 + else: + g = Prop.land(g,Prop.neg(Prop.var(x[i]))) + if count <= r: + solver.assert_satisfiable(g,msg=msg) + else: + solver.assert_unsatisfiable(g,msg=msg) diff --git a/tests/test_ecc.py b/tests/test_ecc.py index 415f2d1..6da311a 100644 --- a/tests/test_ecc.py +++ b/tests/test_ecc.py @@ -1,269 +1,55 @@ -from pygplib import Ecc - -def test_init(): - tests = [ - ( - # V1---V2 - # | / - # | / - # | / - # V3---V5 - # - # vertex_list - [2,3,1,5], - # edge_list - [ - (2,1), - (3,1), - (2,3), - (3,5), - ], - # _invdic - { - (1,2) : 0, - (1,3) : 1, - (2,3) : 2, - (3,5) : 3, - 2 : 0, - 3 : 1, - 1 : 2, - 5 : 3, - }, - # N - { - 1: {2,3}, - 2: {1,3}, - 3: {1,2,5}, - 5: {3}, - } - ), - ] - - for vertex_list, edge_list, _invdic, N in tests: - obj = Ecc(vertex_list, edge_list) - for key in obj._invdic.keys(): - val = obj._invdic[key] - assert key in _invdic - assert _invdic[key] == val - for key in _invdic.keys(): - val = _invdic[key] - assert key in obj._invdic - assert obj._invdic[key] == val - for key in obj._N.keys(): - val = obj._N[key] - assert key in N - assert N[key] == val - for key in N.keys(): - val = N[key] - assert key in obj._N - assert obj._N[key] == val - -def test__select_uncovered_edge(): - # V1---V2 - # | / - # | / - # | / - # V3---V5 - # - vertex_list = [2,3,1,5] - edge_list = \ - [ - (2,1), - (3,1), - (2,3), - (3,5), - ] - tests = ( - ( - # U: - [(1,2),(1,3),(2,3)], - # variant - "r", - ), - ) - - obj = Ecc(vertex_list, edge_list) - for U, variant in tests: - if variant == "r": - for i in range(10): - u,v = obj._select_uncovered_edge(U,variant=variant) - assert u in obj._verts - assert v in obj._verts - e = tuple(sorted([u,v])) - assert e in U - -def test__find_clique_covering(): - # V1---V2 - # | / - # | / - # | / - # V3---V5 - # - vertex_list = [2,3,1,5] - edge_list = \ - [ - (2,1), - (3,1), - (2,3), - (3,5), - ] - tests = ( - ( - # u: - 1, - # v: - 2, - # U: - [(1,2),(1,3),(2,3)], - # variant - "r", - # Q - (1,2,3), - ), - ( - # u: - 3, - # v: - 5, - # U: - [(1,2), (1,3), (2,3), (3,5)], - # variant - "r", - # Q - (3,5), - ), - ) - - obj = Ecc(vertex_list, edge_list) - for u, v, U, variant, expected in tests: - if variant == "r": - for i in range(10): - Q = obj._find_clique_covering(u,v,U,variant=variant) - assert Q == expected - -def test__mark_all_edges_covered(): - # V1---V2 - # | / - # | / - # | / - # V3---V5 - # - vertex_list = [2,3,1,5] - edge_list = \ - [ - (2,1), - (3,1), - (2,3), - (3,5), - ] - tests = ( - ( - # Q: - (1,2,3), - # U: - [(1,2),(1,3),(2,3)], - # pi: - [0,1,2,3], - # expected - { - "U" : [], - } - ), - ) +import random - obj = Ecc(vertex_list, edge_list) - for Q,U,pi,expected in tests: - obj._mark_all_edges_covered(Q,U,pi) - assert U == expected["U"] - assert set(pi) == set([i for i in range(len(obj._edges))]) +from pygplib import Ecc +from simpgraph import SimpGraph +from . import graph def test_compute_ecc(): - tests = ( - ( - # V1---V2 - # | /| - # | / | - # | / | - # V3---V5 - # - # vertex_list - [2,3,1,5], - # edge_list - [ - (2,1), - (3,1), - (2,3), - (3,5), - (2,5), - ], - # variant - "rr", - ), - ) - - for vertex_list, edge_list, variant in tests: - obj = Ecc(vertex_list, edge_list) + variant = "rr" + for step in range(100): + n = 20 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=True, + reject_isolated_edge=True) + if G is None: + continue + obj = Ecc(list(G.all_vertices()), list(G.all_edges())) res = obj.compute_ecc(variant=variant) - # clique? for Q in res: + assert len(Q) > 0 for u in Q: for v in Q: - if u != v: - e = tuple(sorted([u,v])) - assert e in obj._edges - + assert u ==v or u in G.adj_vertices(v) # edge cover? - for edge in obj._edges: + for v,w in G.all_edges(): found = False for Q in res: - if edge[0] in Q and edge[1] in Q: + if v in Q and w in Q: found = True break assert found def test_compute_separating_ecc(): - tests = ( - ( - # V1---V2 - # | /| - # | / | - # | / | - # V3---V5 - # - # vertex_list - [2,3,1,5], - # edge_list - [ - (2,1), - (3,1), - (2,3), - (3,5), - (2,5), - ], - # variant - "rr", - ), - ) - - for vertex_list, edge_list, variant in tests: - obj = Ecc(vertex_list, edge_list) + variant = "rr" + for step in range(100): + n = 20 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=True, + reject_isolated_edge=True) + if G is None: + continue + obj = Ecc(list(G.all_vertices()), list(G.all_edges())) res = obj.compute_separating_ecc(variant=variant) - - # clique? - for Q in res: - for u in Q: - for v in Q: - if u != v: - e = tuple(sorted([u,v])) - assert e in obj._edges - - # edge cover? - for edge in obj._edges: - found = False - for Q in res: - if edge[0] in Q and edge[1] in Q: - found = True - break - assert found - + # separating? + for u in G.all_vertices(): + for v in G.all_vertices(): + if u == v: + continue + found = False + for Q in res: + if u in Q and v not in Q or v in Q and u not in Q: + found = True + assert found diff --git a/tests/test_fog.py b/tests/test_fog.py index 5e4877c..3261029 100644 --- a/tests/test_fog.py +++ b/tests/test_fog.py @@ -252,7 +252,7 @@ def test_format(): assert test_form == res_form -def test_generator(): +def test_generate_subformulas(): tests = [ ( "T", @@ -336,7 +336,7 @@ def test_generator(): pos0 = 0 pos1 = 0 pos2 = 0 - for i, g in op.generator(res): + for i, g in op.generate_subformulas(res): if i == 0: assert pos0 < len(prefix_expected) assert op.to_str(g) == prefix_expected[pos0] @@ -421,7 +421,7 @@ def test_generator(): pos0 = 0 pos1 = 0 pos2 = 0 - for i, g in op.generator(res, skip_shared=True): + for i, g in op.generate_subformulas(res, skip_shared=True): if i == 0: assert pos0 < len(prefix_expected) assert op.to_str(g) == prefix_expected[pos0] @@ -729,85 +729,3 @@ def test_substitute(): res = op.substitute(form, op1, op2) res_str = op.to_str(res) assert res_str == expected, f"{res_str}, {expected}" - - -def test_perform_boolean_encoding(): - tests = [ - ("T", "T"), - ("F", "F"), - ("x = x", "T"), - ("edg(x, x)", "F"), - ("x = y", "((x@1 <-> y@1) & (x@2 <-> y@2))"), - ( - "edg(x, y)", - "(((x@1 & y@1) | (x@2 & y@2)) & (~ ((x@1 <-> y@1) & (x@2 <-> y@2))))", - ), - ( - "x = y | edg(x, y)", - "(((x@1 <-> y@1) & (x@2 <-> y@2)) | (((x@1 & y@1) | (x@2 & y@2)) & (~ ((x@1 <-> y@1) & (x@2 <-> y@2)))))", - ), - ("! [x] : x = y", "((((T <-> y@1) & (F <-> y@2)) & ((T <-> y@1) & (T <-> y@2))) & ((F <-> y@1) & (T <-> y@2)))"), - ("? [x] : x = y", "((((T <-> y@1) & (F <-> y@2)) | ((T <-> y@1) & (T <-> y@2))) | ((F <-> y@1) & (T <-> y@2)))"), - ] - - NameMgr.clear() - # | 1 2 - #-------- - # 1| 1 0 - # 2| 1 1 - # 3| 0 1 - vertex_list = [1,2,3] - edge_list= [(1,2), (2,3)] - st = GrSt(vertex_list, edge_list) - - for test_str, expected in tests: - res = Fog.read(test_str) - res = op.perform_boolean_encoding(res, st) - res_str = op.to_str(res) - assert res_str == expected, f"{res_str}, {expected}" - -def test_perform_boolean_encoding_partitioning_order(): - tests = [ - ("T", "T"), - ("F", "F"), - ("x = x", "T"), - ("edg(x, x)", "F"), - ("x = y", "((x@1 <-> y@1) & (x@2 <-> y@2))"), - ( - "edg(x, y)", - "(((x@1 & y@1) | (x@2 & y@2)) & (~ ((x@1 <-> y@1) & (x@2 <-> y@2))))", - ), - ( - "x = y | edg(x, y)", - "(((x@1 <-> y@1) & (x@2 <-> y@2)) | (((x@1 & y@1) | (x@2 & y@2)) & (~ ((x@1 <-> y@1) & (x@2 <-> y@2)))))", - ), - ( - "! [x] : x = y", - "(((T <-> y@1) & (F <-> y@2)) & (((T <-> y@1) & (T <-> y@2)) & ((F <-> y@1) & (T <-> y@2))))", - ), - ( - "? [x] : x = y", - "(((T <-> y@1) & (F <-> y@2)) | (((T <-> y@1) & (T <-> y@2)) | ((F <-> y@1) & (T <-> y@2))))", - ), - ] - - NameMgr.clear() - Fog.partitioning_order = True - Prop.partitioning_order = True - # | 1 2 - #-------- - # 1| 1 0 - # 2| 1 1 - # 3| 0 1 - vertex_list = [1,2,3] - edge_list= [(1,2), (2,3)] - st = GrSt(vertex_list, edge_list) - - for test_str, expected in tests: - res = Fog.read(test_str) - res = op.perform_boolean_encoding(res, st) - res_str = op.to_str(res) - assert res_str == expected, f"{res_str}, {expected}" - - Fog.partitioning_order = False - Prop.partitioning_order = False diff --git a/tests/test_fog_excp.py b/tests/test_fog_excp.py index 0141a57..4a19d37 100644 --- a/tests/test_fog_excp.py +++ b/tests/test_fog_excp.py @@ -29,7 +29,7 @@ def test_format(): pass -def test_generator(): +def test_generate_subformulas(): pass diff --git a/tests/test_grst.py b/tests/test_grst.py index 33bd313..d503504 100644 --- a/tests/test_grst.py +++ b/tests/test_grst.py @@ -1,438 +1,500 @@ +import random + from pygplib import GrSt, NameMgr, Fog, Prop from pygplib import op +from . import solver +from . import graph +from . import tools + +def test_vertex_to_object(): + NameMgr.clear() + for step in range(3): + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=True, + reject_isolated_edge=True) + if G is None: + continue + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + for encoding in ["direct", "log", "vertex", "edge", "clique"]: + st = GrSt(vertex_list, edge_list, encoding=encoding) + for v in vertex_list: + w = st.vertex_to_object(v) + assert v == st.object_to_vertex(w) + for w in st.domain: + v = st.object_to_vertex(w) + assert w == st.vertex_to_object(v) -def test_init(): - # tests for exceptional graphs +def test_domain_direct_enc(): tests = [ - ( - # vertex_list - [], # empty graph! - # edge_list - [], - # encoding - "edge", # in edge-encoding - ), - ( - # vertex_list - [], # empty graph! - # edge_list - [], - # encoding - "clique", # in clique-encoding - ), - ( - # vertex_list - [], # empty graph! - # edge_list - [], - # encoding - "direct", # in direct-encoding - ), - ( - # vertex_list - [], # empty graph! - # edge_list - [], - # encoding - "log", # in log-encoding - ), - ( - # V1---V2 - # | - # V3 V4 - # - # vertex_list - [1,2,3,4], - # edge_list - [ - (1,2), - (1,3), # one isolated vertex - ], - # encoding - "edge", # in edge-encoding - ), - ( - # V1---V2 - # | - # V3 V4 - # - # vertex_list - [1,2,3,4], - # edge_list - [ - (1,2), - (1,3), # one isolated vertex - ], - # encoding - "clique", # in clique-encoding - ), - ( - # V1---V2 - # - # vertex_list - [1,2], - # edge_list - [(1,2)], # isolated edge - # encoding - "direct", # in direct-encoding - ), - ( - # V1---V2 - # - # vertex_list - [1,2], - # edge_list - [(1,2)], # isolated edge - # encoding - "log", # in log-encoding - ), - ( - # V1 V2 - # - # vertex_list - [1,2], - # edge_list - [], # multiple isolated vertices - # encoding - "direct", # in direct-encoding - ), - ( - # V1 V2 - # - # vertex_list - [1,2], - # edge_list - [], # multiple isolated vertices - # encoding - "log", # in log-encoding - ), + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 0 0 + # 2| 0 1 0 0 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,),(2,),(3,),(4,)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 0 0 + # 2| 0 1 0 0 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((1,),(2,),(3,),(4,)) + ), ] + NameMgr.clear() + + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="direct") + assert st._codes == st.vertex_to_object(expected) - for vertex_list, edge_list, encoding in tests: - NameMgr.clear() - st = GrSt(vertex_list,edge_list,encoding=encoding) - check_grst_object(st, vertex_list, edge_list, encoding) - - # tests over encoding and prefix - # V1---V2 - # | / - # | / - # | / - # V3---V5 - # - # vertex_list - vertex_list = [2,3,1,5] - # edge_list - edge_list = [ - (2,1), - (3,1), - (2,3), - (3,5), +def test_domain_log_enc(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 + # ---- + # 1| 0 0 + # 2| 1 0 + # 3| 0 1 + # 4| 1 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((),(1,),(2,),(1,2)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 + # ---- + # 1| 0 0 + # 2| 1 0 + # 3| 0 1 + # 4| 1 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((),(1,),(2,),(1,2)) + ), ] - for encoding in ["edge", "clique", "direct", "log"]: - for prefix in ["V", "WW", "V12"]: - NameMgr.clear() - st = GrSt(vertex_list,edge_list,encoding=encoding) - check_grst_object(st, vertex_list, edge_list, encoding) - -def check_grst_object(st: GrSt, \ - vertex_list: list, edge_list: list, encoding: str): - assert st._encoding == encoding - assert set(vertex_list) == set(st._verts) - for edge in edge_list: - assert tuple(sorted(edge)) in st._edges - for edge in st._edges: - assert edge in edge_list or tuple([edge[1],edge[0]]) in edge_list + NameMgr.clear() - for v in vertex_list: - assert st.object_to_vertex(st.vertex_to_object(v)) == v - for obj in st.domain: - assert st.vertex_to_object(st.object_to_vertex(obj)) == obj - - for u in st._verts: - for v in st._verts: - if tuple(sorted([u,v])) in st._edges: - assert st.adjacent(\ - st.vertex_to_object(u), \ - st.vertex_to_object(v)) - else: - assert not st.adjacent(\ - st.vertex_to_object(u), \ - st.vertex_to_object(v)) - if u == v: - assert st.equal(\ - st.vertex_to_object(u), \ - st.vertex_to_object(v)) - else: - assert not st.equal(\ - st.vertex_to_object(u), \ - st.vertex_to_object(v)) + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="log") + assert st._codes == st.vertex_to_object(expected) -def test_interpretation(): +def test_domain_vertex_enc(): tests = [ - ( - # | 1 2 - #-------- - # 1| 1 0 - # 2| 1 1 - # 3| 0 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "edge", - # first symbol - "x", - # second symbol - "V1", - # expected adjacency between first and second symbols - "(((x@1 & T) | (x@2 & F)) & (~ ((x@1 <-> T) & (x@2 <-> F))))", - # expected equality between first and second symbols - "((x@1 <-> T) & (x@2 <-> F))", - # expected domain constraint of first symbol - "(((x@1 & (~ x@2)) | (x@1 & x@2)) | ((~ x@1) & x@2))", - ), - ( - # | 1 2 - #-------- - # 1| 1 0 - # 2| 1 1 - # 3| 0 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "edge", - # first symbol - "x", - # second symbol - "y", - # expected adjacency between first and second symbols - "(((x@1 & y@1) | (x@2 & y@2)) & (~ ((x@1 <-> y@1) & (x@2 <-> y@2))))", - # expected equality between first and second symbols - "((x@1 <-> y@1) & (x@2 <-> y@2))", - # expected domain constraint of first symbol - "(((x@1 & (~ x@2)) | (x@1 & x@2)) | ((~ x@1) & x@2))", - ), - ( - # | 1 2 3 - #--------- - # 1| 1 - # 2| 1 - # 3| 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "direct", - # first symbol - "x", - # second symbol - "V1", - # expected adjacency between first and second symbols - "(((x@1 & F) | (x@2 & T)) | ((x@2 & F) | (x@3 & F)))", # equiv. x@2 & T, meaning x = V2 - # expected equality between first and second symbols - "(((x@1 <-> T) & (x@2 <-> F)) & (x@3 <-> F))", - # expected domain constraint of first symbol - "((((~ (x@1 & x@2)) & (~ (x@1 & x@3))) & (~ (x@2 & x@3))) & ((x@1 | x@2) | x@3))", - ), - ( - # | 1 2 - #------- - # 1| - # 2| 1 - # 3| 0 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "log", - # first symbol - "x", - # second symbol - "V1", - # expected adjacency between first and second symbols - "((((((x@1 <-> F) & (x@2 <-> F)) & (F <-> T)) & (F <-> F)) | ((((x@1 <-> T) & (x@2 <-> F)) & (F <-> F)) & (F <-> F))) | (((((x@1 <-> T) & (x@2 <-> F)) & (F <-> F)) & (F <-> T)) | ((((x@1 <-> F) & (x@2 <-> T)) & (F <-> T)) & (F <-> F))))", # equiv. x@1 <-> T & x@2 <-> F, meaning x = V2 - # expected equality between first and second symbols - "((x@1 <-> F) & (x@2 <-> F))", - # expected domain constraint of first symbol - "((((~ F) & (x@1 <-> F)) & (x@2 <-> T)) | (~ x@2))", - ), - ( - # | 1 2 3 - #--------- - # 1| 1 - # 2| 1 - # 3| 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "direct", - # first symbol - "x", - # second symbol - "y", - # expected adjacency between first and second symbols - "(((x@1 & y@2) | (x@2 & y@1)) | ((x@2 & y@3) | (x@3 & y@2)))", - # expected equality between first and second symbols - "(((x@1 <-> y@1) & (x@2 <-> y@2)) & (x@3 <-> y@3))", - # expected domain constraint of first symbol - "((((~ (x@1 & x@2)) & (~ (x@1 & x@3))) & (~ (x@2 & x@3))) & ((x@1 | x@2) | x@3))", - ), - ( - # | 1 2 - #------- - # 1| - # 2| 1 - # 3| 1 - # - # formula - "x=y", - # vertex list - [1,2,3], - # edge list - [(1,2), (2,3)], - # encoding - "log", - # first symbol - "x", - # second symbol - "y", - # expected adjacency between first and second symbols - "((((((x@1 <-> F) & (x@2 <-> F)) & (y@1 <-> T)) & (y@2 <-> F)) | ((((x@1 <-> T) & (x@2 <-> F)) & (y@1 <-> F)) & (y@2 <-> F))) | (((((x@1 <-> T) & (x@2 <-> F)) & (y@1 <-> F)) & (y@2 <-> T)) | ((((x@1 <-> F) & (x@2 <-> T)) & (y@1 <-> T)) & (y@2 <-> F))))", - # expected equality between first and second symbols - "((x@1 <-> y@1) & (x@2 <-> y@2))", - # expected domain constraint of first symbol - "((((~ F) & (x@1 <-> F)) & (x@2 <-> T)) | (~ x@2))", - ), - ( - # | 1 2 3 - #--------- - # 1| 1 0 1 - # 2| 1 1 0 - # 3| 0 0 1 - # 4| 0 1 0 - # - # formula - "x=y", - # vertex list - [1,2,3,4], - # edge list - [(1,2), (2,4),(1,3)], - # encoding - "edge", - # first symbol - "x", - # second symbol - "y", - # expected adjacency between first and second symbols - "((((x@1 & y@1) | (x@2 & y@2)) | (x@3 & y@3)) & (~ (((x@1 <-> y@1) & (x@2 <-> y@2)) & (x@3 <-> y@3))))", - # expected equality between first and second symbols - "(((x@1 <-> y@1) & (x@2 <-> y@2)) & (x@3 <-> y@3))", - # expected domain constraint of first symbol - "(((((x@1 & (~ x@2)) & x@3) | ((x@1 & x@2) & (~ x@3))) | (((~ x@1) & (~ x@2)) & x@3)) | (((~ x@1) & x@2) & (~ x@3)))", - ), - ( - # | 1 2 3 - #--------- - # 1| 1 0 1 - # 2| 1 1 0 - # 3| 0 0 1 - # 4| 0 1 0 - # - # formula - "x=y", - # vertex list - [1,2,3,4], - # edge list - [(1,2), (2,4),(1,3)], - # encoding - "direct", - # first symbol - "x", - # second symbol - "y", - # expected adjacency between first and second symbols - "((((x@1 & y@2) | (x@2 & y@1)) | ((x@2 & y@4) | (x@4 & y@2))) | ((x@1 & y@3) | (x@3 & y@1)))", - # expected equality between first and second symbols - "((((x@1 <-> y@1) & (x@2 <-> y@2)) & (x@3 <-> y@3)) & (x@4 <-> y@4))", - # expected domain constraint of first symbol - "(((((((~ (x@1 & x@2)) & (~ (x@1 & x@3))) & (~ (x@1 & x@4))) & (~ (x@2 & x@3))) & (~ (x@2 & x@4))) & (~ (x@3 & x@4))) & (((x@1 | x@2) | x@3) | x@4))", - ), + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 1 1 0 + # 2| 0 1 1 0 + # 3| 0 0 1 1 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,2,3),(2,3),(3,4),(4,)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 1 0 + # 2| 0 1 1 1 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((1,3),(2,3,4),(3,),(4,)) + ), ] + NameMgr.clear() + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="vertex") + assert st._codes == st.vertex_to_object(expected) + +def test_domain_edge_enc(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 1 0 0 + # 2| 1 0 1 0 + # 3| 0 1 1 1 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,2),(1,3),(2,3,4),(4,)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 + # ------ + # 1| 1 0 0 + # 2| 0 1 1 + # 3| 1 1 0 + # 4| 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((1,),(2,3),(1,2),(3,)) + ), + ] NameMgr.clear() - for test_str, vertex_list, edge_list, encoding, first_var, second_var,\ - expected_edg, expected_eq, expected_domain in tests: - f = Fog.read(test_str) - st = GrSt(vertex_list, edge_list, encoding=encoding) - - g = st.encode_edg(\ - NameMgr.lookup_index(first_var),\ - NameMgr.lookup_index(second_var)\ - ) - res = op.to_str(g) - assert res == expected_edg, f"{res}, {expected_edg}" - - g = st.encode_eq(\ - NameMgr.lookup_index(first_var),\ - NameMgr.lookup_index(second_var)\ - ) - res = op.to_str(g) - assert res == expected_eq, f"{res}, {expected_eq}" - - g = st.compute_domain_constraint(NameMgr.lookup_index(first_var)) - res = op.to_str(g) - assert res == expected_domain, f"{res}, {expected_domain}" - -def test__compute_log_relation(): + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="edge") + assert st._codes == st.vertex_to_object(expected) + +def test_domain_clique_enc(): tests = [ ( - # 1|00 - # 2|10 - # 3|01 - # 4|11 + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 + # ------ + # 1| 1 1 0 + # 2| 1 0 0 + # 3| 1 1 1 + # 4| 0 0 1 # # vertex_list [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # ecc + ((1,2,3),(1,3),(3,4)), # expected - ((2,4),(3,4)) + ((1,2),(1,),(1,2,3),(3,)) ), ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 + # ------ + # 1| 1 0 0 + # 2| 0 1 1 + # 3| 1 1 0 + # 4| 0 0 1 + # # vertex_list - [], + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # ecc + ((1,3),(2,3),(2,4)), # expected - (), + ((1,),(2,3),(1,2),(3,)) ), + ] + NameMgr.clear() + + for vertex_list, edge_list, ecc, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="clique", ecc=ecc) + assert st._codes == st.vertex_to_object(expected) + +def test__compute_relation_direct_enc(): + tests = [ ( - # 1|0 + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 0 0 + # 2| 0 1 0 0 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,),(2,),(3,),(4,)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 0 0 + # 2| 0 1 0 0 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # # vertex_list - [1], + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], # expected - () + ((1,),(2,),(3,),(4,)) + ), + ] + NameMgr.clear() + + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="direct") + res = st._compute_relation_direct_enc() + assert res == st.vertex_to_object(expected) + +def test__compute_relation_edge_enc(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 1 0 0 + # 2| 1 0 1 0 + # 3| 0 1 1 1 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,2),(1,3),(2,3),(3,4)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 + # ------ + # 1| 1 0 0 + # 2| 0 1 1 + # 3| 1 1 0 + # 4| 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((1,3),(2,3),(2,4)) + ), + ] + NameMgr.clear() + + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="edge") + res = st._compute_relation_edge_enc() + assert res == st.vertex_to_object(expected) + +def test__compute_relation_clique_enc(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 + # ------ + # 1| 1 1 0 + # 2| 1 0 0 + # 3| 1 1 1 + # 4| 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # ecc + ((1,2,3),(1,3),(3,4)), + # expected + ((1,2,3),(1,3),(3,4)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 + # ------ + # 1| 1 0 0 + # 2| 0 1 1 + # 3| 1 1 0 + # 4| 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # ecc + ((1,3),(2,3),(2,4)), + # expected + ((1,3),(2,3),(2,4)) + ), + ] + NameMgr.clear() + + for vertex_list, edge_list, ecc, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="clique", ecc=ecc) + res = st._compute_relation_clique_enc() + assert res == st.vertex_to_object(expected) + +def test__compute_relation_vertex_enc(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 3 4 + # -------- + # 1| 1 1 1 0 + # 2| 0 1 1 0 + # 3| 0 0 1 1 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # expected + ((1,),(1,2),(1,2,3),(3,4)) + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 1 0 + # 2| 0 1 1 1 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # expected + ((1,),(2,),(1,2,3),(2,4)) + ), + ] + NameMgr.clear() + + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="vertex") + res = st._compute_relation_vertex_enc() + assert res == st.vertex_to_object(expected) + +def test__compute_relation_log_enc(): + tests = [ + ( + # 1|00 + # 2|10 + # 3|01 + # 4|11 + # + # vertex_list + [1,2,3,4], + # expected + ((2,4),(3,4)) ), ( # 1|0 @@ -535,8 +597,461 @@ def test__compute_log_relation(): ((4,2,7,8),(3,2,6,8),(5,7,6,8),(9,)) ), ] + NameMgr.clear() for vertex_list, expected in tests: st = GrSt(vertex_list, [], encoding="log") - res = st._compute_log_relation() - assert res == expected + res = st._compute_relation_log_enc() + assert res == st.vertex_to_object(expected) + +def test_out_neighborhood(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 3 2 1 4 + # -------- + # 3| 1 1 1 1 + # 2| 0 1 1 0 + # 1| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [3,2,1,4], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # out_neighborhood + {1:(), 2:(1,), 3:(2,1,4), 4:()} + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 3 2 1 4 + # -------- + # 3| 1 1 1 0 + # 2| 0 1 0 1 + # 1| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [3,2,1,4], + # edge_list + [(1,3),(2,3),(2,4)], + # out_neighborhood + {1:(), 2:(4,), 3:(2,1), 4:()} + ), + ] + NameMgr.clear() + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="vertex") + for v in vertex_list: + assert sorted(st._out_neighborhood(st.vertex_to_object(v)))\ + == sorted(st.vertex_to_object(expected[v])), {f"v={v}"} + +def test_in_neighborhood(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # 1 2 4 3 + # -------- + # 1| 1 1 0 1 + # 2| 0 1 0 1 + # 4| 0 0 1 1 + # 3| 0 0 0 1 + # + # vertex_list + [1,2,4,3], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # in_neighborhood + {1:(), 2:(1,), 3:(1,2,4), 4:()} + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # 1 2 3 4 + # -------- + # 1| 1 0 1 0 + # 2| 0 1 1 1 + # 3| 0 0 1 0 + # 4| 0 0 0 1 + # + # vertex_list + [1,2,3,4], + # edge_list + [(1,3),(2,3),(2,4)], + # in_neighborhood + {1:(), 2:(), 3:(1,2), 4:(2,)} + ), + ] + NameMgr.clear() + for vertex_list, edge_list, expected in tests: + st = GrSt(vertex_list, edge_list, encoding="vertex") + for v in vertex_list: + assert sorted(st._in_neighborhood(st.vertex_to_object(v)))\ + == sorted(st.vertex_to_object(expected[v])), f"v={v}" + +def test_lt(): + tests = [ + ( + # 1--2 + # | / + # |/ + # 3--4 + # + # direct-encoding + # 2 1 4 3 + # -------- + # 2| 1 0 0 0 + # 1| 0 1 0 0 + # 4| 0 0 1 0 + # 3| 0 0 0 1 + # + # log-encoding + # ---- + # 2| 0 0 + # 1| 1 0 + # 4| 0 1 + # 3| 1 1 + # + # vertex-encoding + # 2 1 4 3 + # -------- + # 2| 1 1 0 1 + # 1| 0 1 0 1 + # 4| 0 0 1 1 + # 3| 0 0 0 1 + # + # edge-encoding + # -------- + # 2| 1 0 1 0 + # 1| 1 1 0 0 + # 4| 0 0 0 1 + # 3| 0 1 1 1 + # + # clique-encoding + # ------ + # 2| 1 0 0 + # 1| 1 1 0 + # 4| 0 0 1 + # 3| 1 1 1 + # + # vertex_list + [2,1,4,3], + # edge_list + [(1,2),(1,3),(2,3),(3,4)], + # ecc + ((1,2,3),(1,3),(3,4)), + # expected + {"direct":(2,1,4,3), + "log":(2,1,4,3), + "vertex":(3,1,2,4), + "edge":(1,2,4,3), + "clique":(2,1,4,3)} + ), + ( + # 1 2 + # | /| + # |/ | + # 3 4 + # + # direct-encoding + # 3 2 1 4 + # -------- + # 3| 1 0 0 0 + # 2| 0 1 0 0 + # 1| 0 0 1 0 + # 4| 0 0 0 1 + # + # log-encoding + # ---- + # 3| 0 0 + # 2| 1 0 + # 1| 0 1 + # 4| 1 1 + # + # vertex-encoding + # 3 2 1 4 + # -------- + # 3| 1 1 1 0 + # 2| 0 1 0 1 + # 1| 0 0 1 0 + # 4| 0 0 0 1 + # + # edge-encoding + # ------ + # 3| 1 1 0 + # 2| 0 1 1 + # 1| 1 0 0 + # 4| 0 0 1 + # + # clique-encoding + # ------ + # 3| 1 1 0 + # 2| 0 1 1 + # 1| 1 0 0 + # 4| 0 0 1 + # + # vertex_list + [3,2,1,4], + # edge_list + [(1,3),(2,3),(2,4)], + # ecc + ((1,3),(2,3),(2,4)), + # expected + {"direct":(3,2,1,4), + "log":(3,2,1,4), + "vertex":(1,3,4,2), + "edge":(1,3,4,2), + "clique":(1,3,4,2),} + ), + ] + NameMgr.clear() + for encoding in ["direct", "log", "vertex", "clique"]: + for vertex_list, edge_list, ecc, expected in tests: + _test_lt(encoding, vertex_list, edge_list, ecc, expected[encoding]) + +def _test_lt(encoding, vertex_list, edge_list, ecc, expected): + st = GrSt(vertex_list, edge_list, encoding=encoding, ecc=ecc) + res = st.sorted(st.domain) + # test lt + assert tuple(map(st.object_to_vertex,res)) == expected,\ + f"enc={encoding},V={vertex_list},E={edge_list},expected={expected}" + +def test_adjacent(): + tests = [ + ("direct", False,False), + ("log", False,False), + ("vertex", False,False), + ("edge", True, True), + ("clique", True, True), + ] + for step in range(10): + for encoding,reject_isolated_vertex,reject_isolated_edge in tests: + _test_adjacent(encoding=encoding, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + +def _test_adjacent(encoding: str, + reject_isolated_vertex: bool, + reject_isolated_edge: bool): + NameMgr.clear() + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + if G is None: + return + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + msg = "V=["+",".join(map(str,vertex_list))+"]" + msg += ",E=["+",".join(map(str,edge_list))+"]" + msg += f",encoding={encoding}" + st = GrSt(vertex_list, edge_list, encoding=encoding, msg=msg) + for v in vertex_list: + for w in vertex_list: + vv = st.vertex_to_object(v) + ww = st.vertex_to_object(w) + if (v,w) in edge_list or (w,v) in edge_list: + assert st.adjacent(vv,ww), msg+f"(v,w)={v},{w}" + assert st.adjacent(ww,vv), msg+f"(v,w)={v},{w}" + else: + assert not st.adjacent(vv,ww), msg+f"(v,w)={v},{w}" + assert not st.adjacent(ww,vv), msg+f"(v,w)={v},{w}" + +def test_be_eq(): + tests = [ + ("direct", False,False), + ("log", False,False), + ("vertex", False,False), + ("edge", True, True), + ("clique", True, True), + ] + for step in range(3): + for encoding,reject_isolated_vertex,reject_isolated_edge in tests: + _test_be_eq(encoding=encoding, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + +def _test_be_eq(encoding: str, + reject_isolated_vertex: bool, + reject_isolated_edge: bool): + NameMgr.clear() + x = NameMgr.lookup_index("x") + y = NameMgr.lookup_index("y") + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + if G is None: + return + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + msg = "V=["+",".join(map(str,vertex_list))+"]" + msg += ",E=["+",".join(map(str,edge_list))+"]" + msg += f",encoding={encoding}" + st = GrSt(vertex_list, edge_list, encoding=encoding, msg=msg) + for u in st.domain: + for v in st.domain: + f = st.be_eq(u, v) + if u == v: + solver.assert_satisfiable(f,msg=msg) + else: + solver.assert_unsatisfiable(f,msg=msg) + +def test_be_edg(): + tests = [ + ("direct", False,False), + ("log", False,False), + ("vertex", False,False), + ("edge", True, True), + ("clique", True, True), + ] + for step in range(3): + for encoding,reject_isolated_vertex,reject_isolated_edge in tests: + _test_be_edg(encoding=encoding, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + +def _test_be_edg(encoding: str, + reject_isolated_vertex: bool, + reject_isolated_edge: bool): + NameMgr.clear() + x = NameMgr.lookup_index("x") + y = NameMgr.lookup_index("y") + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + if G is None: + return + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + msg = "V=["+",".join(map(str,vertex_list))+"]" + msg += ",E=["+",".join(map(str,edge_list))+"]" + msg += f",encoding={encoding}" + st = GrSt(vertex_list, edge_list, encoding=encoding, msg=msg) + for u in st._G.all_vertices(): + for v in st._G.all_vertices(): + pair = f",(u,v)={st.object_to_vertex(u)},{st.object_to_vertex(v)}" + f = st.be_edg(u,v) + f = Prop.land(f, st.compute_auxiliary_constraint(Fog.edg(u,v))) + if (u,v) in st._G.all_edges() or (v,u) in st._G.all_edges(): + solver.assert_satisfiable(f,msg=msg+pair) + else: + solver.assert_unsatisfiable(f,msg=msg+pair) + +def test_be_lt(): + tests = [ + ("direct", False,False), + ("log", False,False), + ("vertex", False,False), + ("edge", True, True), + ("clique", True, True), + ] + for step in range(3): + for encoding,reject_isolated_vertex,reject_isolated_edge in tests: + _test_be_lt(encoding=encoding, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + +def _test_be_lt(encoding: str, + reject_isolated_vertex: bool, + reject_isolated_edge: bool): + NameMgr.clear() + x = NameMgr.lookup_index("x") + y = NameMgr.lookup_index("y") + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + if G is None: + return + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + msg = "V=["+",".join(map(str,vertex_list))+"]" + msg += ",E=["+",".join(map(str,edge_list))+"]" + msg += f",encoding={encoding}" + st = GrSt(vertex_list, edge_list, encoding=encoding, msg=msg) + for u in st._G.all_vertices(): + for v in st._G.all_vertices(): + f = st.be_lt(u,v) + if st.lt(u,v): + solver.assert_satisfiable(f,msg=msg) + else: + solver.assert_unsatisfiable(f,msg=msg) + +def test_be_domain(): + tests = [ + ("direct", False,False), + ("log", False,False), + ("vertex", False,False), + ("edge", True, True), + ("clique", True, True), + ] + for step in range(3): + for encoding,reject_isolated_vertex,reject_isolated_edge in tests: + _test_be_domain(encoding=encoding, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + +def _test_be_domain(encoding: str, + reject_isolated_vertex: bool, + reject_isolated_edge: bool): + NameMgr.clear() + x = NameMgr.lookup_index("x") # first-order variable + n = 5 # number of vertices + m = random.randint(0,(n*(n-1))//2) # number of edges + G = graph.random_graph(n,m, + reject_isolated_vertex=reject_isolated_vertex, + reject_isolated_edge=reject_isolated_edge) + if G is None: + return + vertex_list = list(G.all_vertices()) + edge_list = list(G.all_edges()) + random.shuffle(vertex_list) + random.shuffle(edge_list) + msg = "V=["+",".join(map(str,vertex_list))+"]" + msg += ",E=["+",".join(map(str,edge_list))+"]" + msg += f",encoding={encoding}" + st = GrSt(vertex_list, edge_list, encoding=encoding, msg=msg) + px = st.get_boolean_var_list(x) + f = st.compute_domain_constraint(x) + for n in range(2**st.code_length): + g = f + code = tuple([i+1 for i in range(st.code_length)\ + if tools.binary_code(n,st.code_length)[i] == 1]) + for i in range(st.code_length): + if i+1 in code: + g = Prop.land(g,Prop.var(px[i])) + else: + g = Prop.land(g,Prop.neg(Prop.var(px[i]))) + if code in st._codes: + solver.assert_satisfiable(g,msg=msg) + else: + solver.assert_unsatisfiable(g,msg=msg) diff --git a/tests/test_name.py b/tests/test_name.py index be97912..d6cfb40 100644 --- a/tests/test_name.py +++ b/tests/test_name.py @@ -10,12 +10,10 @@ def test_name_mgr(): ("x1", True), ("x2", True), ("x3", True), - ("y2@1", True), ("V2", False), ("V3", False), ("y1", True), ("x_1", True), - ("X_1_x@2", False), ] NameMgr.clear() diff --git a/tests/test_prop.py b/tests/test_prop.py index 9c0988b..0e76091 100644 --- a/tests/test_prop.py +++ b/tests/test_prop.py @@ -1,7 +1,11 @@ import random +from pysat.formula import CNF +from pysat.solvers import Solver + from pygplib import Prop, NameMgr, GrSt import pygplib.op as op +from . import solver def test_read(): @@ -16,8 +20,8 @@ def test_read(): ("F ", "F"), ("~F", "(~ F)"), ("~ F", "(~ F)"), - ("x@1", "x@1"), - ("~x@1", "(~ x@1)"), + ("x_1", "x_1"), + ("~x_1", "(~ x_1)"), ("~~T", "(~ (~ T))"), ("~(~T)", "(~ (~ T))"), ("((T))", "T"), @@ -50,13 +54,24 @@ def test_read(): res_str = op.to_str(res) assert res_str == expected, f"{res_str}, {expected}" +def gen_form_rand(depth: int = 3) -> Prop: + if depth < 1: + raise ValueError("depth >= 1") + + def gen_atom_rand() -> Prop: + var_name = ["x_1", "y_1", "z_1"] + op = NameMgr.lookup_index(random.choice(var_name)) + + atom_name = ["var", "T", "F"] + name = random.choice(atom_name) + pass def gen_form_rand(depth: int = 3) -> Prop: if depth < 1: raise ValueError("depth >= 1") def gen_atom_rand() -> Prop: - var_name = ["x@1", "y@1", "z@1"] + var_name = ["x_1", "y_1", "z_1"] op = NameMgr.lookup_index(random.choice(var_name)) atom_name = ["var", "T", "F"] @@ -111,65 +126,35 @@ def test_format(): def test_compute_cnf(): tests = [ - (("T",), (0, set())), - (("~ F",), (0, set())), - (("F",), (0, {""})), - (("~ T",), (0, {""})), - (("x@1",), (1, {"1"})), - # f2 <-> -f1 = (-f2 + (-f1)) * (f2 + f1) - (("(~ x@1)",), (1, {"2", "-2,-1", "1,2"})), - # f3 <-> f2 * f1 = ((-f3) + f1) * ((-f3) + f2) * (f3 + (-f1) + (-f2)) - (("x@1 & x@2",), (2, {"3", "-3,1", "-3,2", "-2,-1,3"})), - # f3 <-> f1 + f2 = ((-f3) + f1 + f2) * (f3 + (-f1)) * (f3 + (-f2)) - (("x@1 | x@2",), (2, {"3", "-3,1,2", "-1,3", "-2,3"})), - # f4 <-> f1 -> f2 = f4 <-> (f3 | f2) * (f3 <-> -f1) - # = ((-f4) + f3 + f2) * (f4 + (-f3)) * (f4 + (-f2)) - # * (f3 + f1) * ((-f3) + (-f1)) - (("x@1 -> x@2",), (2, {"4", "-4,2,3", "-3,4", "-2,4", "1,3", "-3,-1"})), - (("T", "T"), (0, set())), - (("T", "~ F"), (0, set())), - (("F", "F"), (0, {""})), - (("x@1", "x@2", "(~ x@1)"), (2, {"1", "2", "3", "-3,-1", "1,3"})), + # formula, its cnf + ("~ (x & y -> z) | x", "x & (x | y) & (x | ~z)"), + ("(x<->z) & ~y | x", "(x | ~z) & (x | ~y)"), + ("~(x<->~z) & ~y", "(~x | y | z) & (x | ~y | z) & (x | y |~z) & (~x | ~y | z) & (x | ~y | ~z) & (~x | ~y | ~z)"), + ] + NameMgr.clear() + + for formula, expected in tests: + f = Prop.read(formula) + g = Prop.read(expected) + solver.assert_equivalence(f,g) + +def test_size(): + tests = [ + ("T", 1), + ("~T", 2), + ("~F", 2), + ("x_1", 1), + ("~x_1", 2), + ("~~T", 3), + ("((x1))", 1), + ("x | y", 3), + ("x | x", 3), + ("~(x|~y|x)", 7), ] NameMgr.clear() - for test_tup, expected in tests: - res_tup = [Prop.read(test_str) for test_str in test_tup] - base, naux, cnf = op.compute_cnf(tuple(res_tup)) - tab = [0] * (base + naux + 1) - for cls in cnf: - for lit in cls: - assert abs(lit) <= base + naux - tab[abs(lit)] = 1 - - for i in range(1, naux + 1): - assert ( - tab[base + i] == 1 - ) # all indices from base +1 to base+naux are used for aux vars. - - count = 0 - for i in range(1, base + 1): - if tab[i] == 1: - count += 1 - assert ( - count == expected[0] - ) # some indices from 1 to base are used for propositional variables in test formula. - - # Renumber variable indices in order to compare with the expected result. - new_index = 1 - for i in range(1, base + naux + 1): - if tab[i] == 1: - tab[i] = new_index - new_index += 1 - - # Normalize cnf as a set of strings. - cnf_set = set() - for cls in cnf: - curr = set() - for lit in cls: - x = tab[abs(lit)] - curr.add(x if lit > 0 else -x) - cnf_set.add(",".join(map(str, sorted(curr)))) - - assert cnf_set == expected[1], f"{cnf_set}, {expected[1]}" + for test_str, expected in tests: + res = Prop.read(test_str) + res_size = op.compute_size(res) + assert res_size == expected, f"{op.to_str(res)}, {res_size}, {expected}" diff --git a/tests/test_prop_excp.py b/tests/test_prop_excp.py index 4341705..d112823 100644 --- a/tests/test_prop_excp.py +++ b/tests/test_prop_excp.py @@ -7,17 +7,11 @@ def test_read(): tests = [ - "{x@1}", # Use "(" and ")" in stead of "{" and "}". - "[x@1]", # - "1x@1", # Leading character of symbol name must not be a digit. - "_x@1", # Leading character of symbol name must not be a symbol. - "@1", # Leading character of symbol name must not be a symbol. - "x", # Variable must include "@". - "x@", # Variable must include at least one digit after "@". - "x@@1", # Variable must include exactly one "@". - "x@_1", # Variable must not include non-digit characters after "@". - "X@1", # Use lowercase letters. - "x_X@1", # + "{x1}", # Use "(" and ")" in stead of "{" and "}". + "[x1]", # + "1x1", # Leading character of symbol name must not be a digit. + "X_1", # Use lowercase letters. + "x_X_1", # ] NameMgr.clear() @@ -32,13 +26,10 @@ def test_format(): def test_compute_cnf(): tests = [ - 0, # Tuple of Propes must be given as input argument. + 0, # Tuple of Props must be given as input argument. 1, - "x@1", + "x_1", Prop.read("T"), - [Prop.read("T")], - {Prop.read("T")}, - (), # Tuple must be non-empty (Fog.read("T"),), # Expression must be an instance of Prop or its subclass ( Prop.read("T"), diff --git a/tests/test_symrelst.py b/tests/test_symrelst.py index 7b1c76b..3e6a730 100644 --- a/tests/test_symrelst.py +++ b/tests/test_symrelst.py @@ -51,7 +51,7 @@ def test_init(): assert st._object_to_pos(st._pos_to_object(pos)) == pos -def test_get_boolean_var(): +def test_boolean_encoding(): domain = (1,3,2) relation = ((1,2), (3,2),(3,1)) @@ -64,64 +64,25 @@ def test_get_boolean_var(): f"test case include object {obj} outside universe." st = SymRelSt(domain, relation) - for obj in domain: - name = NameMgr.lookup_name(obj) - index_list = [NameMgr.lookup_index(f"{name}@{pos+1}") \ - for pos in range(st.code_length)] + var_set = {NameMgr.lookup_index(name) for name in ["x", "y", "z"]} + boolean_var_set = set() + for i in var_set: for pos in range(st.code_length): - assert st.get_boolean_var(obj, pos) == index_list[pos] - -def test_get_symbol_index(): - - domain = (1,3,2) - relation = ((1,2), (3,2),(3,1)) - NameMgr.clear() - max_obj = 4 - universe = set([NameMgr.lookup_index(f"V{i+1}")\ - for i in range(max_obj)]) - for obj in domain: - assert obj in universe, \ - f"test case include object {obj} outside universe." - - st = SymRelSt(domain, relation) - for obj in domain: - name = NameMgr.lookup_name(obj) - index_list = [NameMgr.lookup_index(f"{name}@{pos+1}") \ - for pos in range(st.code_length)] - for i in index_list: - assert st.get_symbol_index(i) == obj - -def test_get_code_pos(): - - domain = (1,3,2) - relation = ((1,2), (3,2),(3,1)) - NameMgr.clear() - max_obj = 4 - universe = set([NameMgr.lookup_index(f"V{i+1}")\ - for i in range(max_obj)]) - for obj in domain: - assert obj in universe, \ - f"test case include object {obj} outside universe." - - st = SymRelSt(domain, relation) - for obj in domain: - name = NameMgr.lookup_name(obj) - index_list = [NameMgr.lookup_index(f"{name}@{pos+1}") \ - for pos in range(st.code_length)] - for pos, i in enumerate(index_list): - assert st.get_code_pos(i) == pos - + v = st.get_boolean_var(i, pos) + assert (i,pos) == st.get_variable_position_pair(v) + boolean_var_set.add(v) + assert len(boolean_var_set) == len(var_set)*st.code_length def test_decode_assignment(): tests = [ - ("-x@1,x@2", {"V3"}), - ("x@2,-x@1", {"V3"}), - ("-x@1,x@2,y@1,y@2", {"V3", "V1"}), - ("-x@1,y@1,x@2,y@2", {"V3", "V1"}), - ("-x@1,x@2,-y@1,y@2", {"V3"}), - ("-x@1,x@2,y@1,y@2,-z@1,-z@2", {"V3", "V1", "V4"}), - ("-x@1,x@2,y@1,y@2,-z@1,-z@2,w@1,-w@2", {"V3", "V1", "V4", "V2"}), + (lambda x,y,z,w: f"-{x[1]},{x[2]}", {"V3"}), + (lambda x,y,z,w: f"{x[2]},-{x[1]}", {"V3"}), + (lambda x,y,z,w: f"-{x[1]},{x[2]},{y[1]},{y[2]}", {"V3", "V1"}), + (lambda x,y,z,w: f"-{x[1]},{y[1]},{x[2]},{y[2]}", {"V3", "V1"}), + (lambda x,y,z,w: f"-{x[1]},{x[2]},-{y[1]},{y[2]}", {"V3"}), + (lambda x,y,z,w: f"-{x[1]},{x[2]},{y[1]},{y[2]},-{z[1]},-{z[2]}", {"V3", "V1", "V4"}), + (lambda x,y,z,w: f"-{x[1]},{x[2]},{y[1]},{y[2]},-{z[1]},-{z[2]},{w[1]},-{w[2]}", {"V3", "V1", "V4", "V2"}), ] @@ -141,60 +102,33 @@ def test_decode_assignment(): assert obj in universe, \ f"test case include object {obj} outside universe." st = SymRelSt(domain,relation) - - for test_str, expected in tests: + index_x = NameMgr.lookup_index("x") + x = [""] + [NameMgr.lookup_name(i) for i in st.get_boolean_var_list(index_x)] + index_y = NameMgr.lookup_index("y") + y = [""] + [NameMgr.lookup_name(i) for i in st.get_boolean_var_list(index_y)] + index_z = NameMgr.lookup_index("z") + z = [""] + [NameMgr.lookup_name(i) for i in st.get_boolean_var_list(index_z)] + index_w = NameMgr.lookup_index("w") + w = [""] + [NameMgr.lookup_name(i) for i in st.get_boolean_var_list(index_w)] + index_dict = {} + for i in st.get_boolean_var_list(index_x)\ + + st.get_boolean_var_list(index_y)\ + + st.get_boolean_var_list(index_z)\ + + st.get_boolean_var_list(index_w): + index_dict[NameMgr.lookup_name(i)] = i + + for func, expected in tests: assign = [] + test_str = func(x,y,z,w) for name in test_str.split(","): - sign = 1 - name = name.replace(" ", "") + name = name.strip() + if len(name) == 0: + continue if name[0] == "-": - sign = -1 - name = name.replace("-", "") - NameMgr.lookup_index(name.split("@")[0]) - index = NameMgr.lookup_index(name) - assign.append(sign * index) + assign.append( -1 * index_dict[name[1:]] ) + else: + assign.append( index_dict[name] ) res = st.decode_assignment(assign) - elem_set = set() - for key in res: - elem_set.add(NameMgr.lookup_name(res[key])) + elem_set = {NameMgr.lookup_name(res[key])\ + for key in st.decode_assignment(assign)} assert elem_set == expected - -def test_enc_dec(): - - tests = [ - ("x@1", ("x", 1)), - ("x@2", ("x", 2)), - ("Va@1", ("Va", 1)), - ("V1_a@2", ("V1_a", 2)), - ] - - # | 1 2 - # --|---- - # 1 | 1 1 - # 2 | 1 0 - # 3 | 0 1 - # 4 | 0 0 - domain = (1,2,3,4) - relation = ((1,2), (1,3)) - NameMgr.clear() - max_obj = 4 - universe = set([NameMgr.lookup_index(f"V{i+1}")\ - for i in range(max_obj)]) - for obj in domain: - assert obj in universe, \ - f"test case include object {obj} outside universe." - st = SymRelSt(domain,relation) - - for test_str, expected in tests: - NameMgr.lookup_index(test_str.split("@")[0]) - index = NameMgr.lookup_index(test_str) - - assert st.exists_symbol(index) - res = st.get_symbol_index(index) - assert NameMgr.lookup_name(res) == expected[0] - - pos = st.get_code_pos(index) - assert pos + 1 == expected[1] - - assert st.exists_boolean_var(res, pos) - assert st.get_boolean_var(res, pos) == index diff --git a/tests/tools.py b/tests/tools.py new file mode 100644 index 0000000..910f4de --- /dev/null +++ b/tests/tools.py @@ -0,0 +1,17 @@ + +def binary_code(n: int, code_length: int): + if code_length <= 0 or not 0 <= n < 2**code_length: + raise ValueError() + res = [] + while True: + if n%2 == 0: + res.append(0) + else: + res.append(1) + if n < 2: + break + n = n/2 + while len(res) < code_length: + res.append(0) + assert len(res) == code_length + return res