From 1c37775ce13e75b69acc3225f67ad58988bc11ae Mon Sep 17 00:00:00 2001 From: statmlben Date: Tue, 15 Oct 2024 14:06:20 +0800 Subject: [PATCH] add tutorials --- doc/source/autoapi/rehline/index.rst | 14 +- doc/source/example.rst | 2 +- doc/source/index.rst | 3 +- doc/source/tutorials.rst | 114 ++++--------- doc/source/tutorials/ReHLine_ERM.rst | 26 +++ doc/source/tutorials/ReHLine_MF.rst | 2 + doc/source/tutorials/ReHLine_manual.rst | 207 ++++++++++++++++++++++++ doc/source/tutorials/constraint.rst | 48 ++++++ doc/source/tutorials/loss.rst | 72 +++++++++ rehline/__init__.py | 5 +- rehline/_class.py | 2 +- to-do.md | 11 ++ 12 files changed, 421 insertions(+), 85 deletions(-) create mode 100644 doc/source/tutorials/ReHLine_ERM.rst create mode 100644 doc/source/tutorials/ReHLine_MF.rst create mode 100644 doc/source/tutorials/ReHLine_manual.rst create mode 100644 doc/source/tutorials/constraint.rst create mode 100644 doc/source/tutorials/loss.rst create mode 100644 to-do.md diff --git a/doc/source/autoapi/rehline/index.rst b/doc/source/autoapi/rehline/index.rst index 146db75..7440721 100644 --- a/doc/source/autoapi/rehline/index.rst +++ b/doc/source/autoapi/rehline/index.rst @@ -19,6 +19,14 @@ Overview - Empirical Risk Minimization (ERM) with a piecewise linear-quadratic (PLQ) objective with a ridge penalty. +.. list-table:: Function + :header-rows: 0 + :widths: auto + :class: summarytable + + * - :py:obj:`ReHLine_solver `\ (X, U, V, Tau, S, T, A, b, max_iter, tol, shrink, verbose, trace_freq) + - \- + Classes @@ -101,7 +109,7 @@ Classes >>> U = -(C*y).reshape(1,-1) >>> L = U.shape[0] >>> V = (C*np.array(np.ones(n))).reshape(1,-1) - >>> clf = ReHLine(loss={'name': 'svm'}, C=C) + >>> clf = ReHLine(C=C) >>> clf.U, clf.V = U, V >>> clf.fit(X=X) >>> print('sol privided by rehline: %s' %clf.coef_) @@ -320,6 +328,10 @@ Classes +Functions +--------- +.. py:function:: ReHLine_solver(X, U, V, Tau=np.empty(shape=(0, 0)), S=np.empty(shape=(0, 0)), T=np.empty(shape=(0, 0)), A=np.empty(shape=(0, 0)), b=np.empty(shape=0), max_iter=1000, tol=0.0001, shrink=1, verbose=1, trace_freq=100) + diff --git a/doc/source/example.rst b/doc/source/example.rst index d546c00..4b122e6 100644 --- a/doc/source/example.rst +++ b/doc/source/example.rst @@ -5,7 +5,7 @@ Example Gallery --------------- .. nblinkgallery:: - :caption: A few links + :caption: Emprical Risk Minimization :name: rst-link-gallery examples/QR.ipynb diff --git a/doc/source/index.rst b/doc/source/index.rst index c96156d..d94c92b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -31,7 +31,7 @@ - Homepage: `https://rehline.github.io/ `_ - GitHub repo: `https://github.com/softmin/ReHLine-python `_ -- Documentation: `https://rehline.readthedocs.io `_ +- Documentation: `https://rehline-python.readthedocs.io `_ - PyPi: `https://pypi.org/project/rehline `_ - Paper: `NeurIPS | 2023 `_ .. - Open Source: `MIT license `_ @@ -76,5 +76,6 @@ If you use this code please star 🌟 the repository and cite the following pape :hidden: getting_started + tutorials example benchmark diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 9bf7915..c22456a 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -1,7 +1,7 @@ Tutorials -========= +--------- -`ReHLine` is designed to address the empirical regularized ReLU-ReHU minimization problem, named *ReHLine optimization*, of the following form: +`ReHLine` is designed to address the regularized ReLU-ReHU minimization problem, named *ReHLine optimization*, of the following form: .. math:: @@ -17,87 +17,43 @@ Some popular examples include SVMs with fairness constraints (FairSVM), elastic net regularized quantile regression (ElasticQR), and ridge regularized Huber minimization (RidgeHuber). -.. image:: ./figs/tab.png +See `Manual ReHLine Formulation`_ documentation for more details and examples on converting your problem to ReHLine formulation. -Solving PLQ ERMs ----------------- -Loss -**** +Moreover, the following specific classes of formulations can be directly solved by `ReHLine`. -.. code:: python - - # name (str): name of the custom loss function - # loss_kwargs: more keys and values for loss parameters - loss = {'name': , <**loss_kwargs>} +- **Empirical Risk Minimization** (ERM) with various loss functions, see `ReHLine: Empirical Risk Minimization`_. +- **Matrix Factorization** (MF) with with various loss functions, see `ReHLine: Matrix Factorization`_. -.. list-table:: - - * - **SVM** - - | ``loss_name``: 'hinge' / 'svm' / 'SVM' - | - | *Example:* ``loss = {'name': 'SVM'}`` - - * - **Quantile Reg** - - | ``loss_name``: 'check' / 'quantile' / 'quantile regression' / 'QR' - | ``qt`` (*float*): qt - | - | *Example:* ``loss = {'name': 'QR', 'qt': 0.25}`` - - * - **Smooth SVM** - - | ``loss_name``: 'sSVM' / 'smooth SVM' / 'smooth hinge' - | - | *Example:* ``loss = {'name': 'sSVM'}`` - - * - **Huber** - - | ``loss_name``: 'huber' / 'Huber' - | - | *Example:* ``loss = {'name': 'huber'}`` - - * - **SVR** - - | ``loss_name``: 'SVR' / 'svr' - | ``epsilon`` (*float*): 0.1 - | - | *Example:* ``loss = {'name': 'svr', 'epsilon': 0.1}`` - -constraint -********** - -.. code:: python - - # list of - # name (str): name of the custom loss function - # loss_kwargs: more keys and values for loss parameters - constraint = [{'name': , <**loss_kwargs>}, ...] +List of Tutorials +================= .. list-table:: + :align: left + :widths: 10 10 20 + :header-rows: 1 + + * - tutorials + - | API + - | description + * - `Manual ReHLine Formulation <./tutorials/ReHLine_manual.rst>`_ + - | `ReHLine <./autoapi/rehline/index.html#rehline.ReHLine>`_ + - | ReHLine minimization with manual parameter settings. + + * - `ReHLine: Empirical Risk Minimization <./tutorials/ReHLine_ERM.rst>`_ + - | `plqERM_Ridge <./autoapi/rehline/index.html#rehline.plqERM_Ridge>`_ + - | Empirical Risk Minimization (ERM) with a piecewise linear-quadratic (PLQ) objective with a ridge penalty. + + * - `ReHLine: Matrix Factorization <./tutorials/ReHLine_MF.rst>`_ + - | `plqMF_Ridge <./autoapi/rehline/index.html#rehline.plqERM_Ridge>`_ + - | Matrix Factorization (MF) with a piecewise linear-quadratic (PLQ) objective with a ridge penalty. + +.. toctree:: + :maxdepth: 2 + :hidden: + + ./tutorials/ReHLine_manual + ./tutorials/ReHLine_ERM + ./tutorials/loss + ./tutorials/constraint - * - **SVM** - - | ``loss_name``: 'hinge' / 'svm' / 'SVM' - | - | *Example:* ``loss = {'name': 'SVM'}`` - - * - **Quantile Reg** - - | ``loss_name``: 'check' / 'quantile' / 'quantile regression' / 'QR' - | ``qt`` (*list*): [q1, q2, ... qK] - | - | *Example:* ``loss = {'name': 'QR', 'qt': [0.25, 0.75]}`` - - * - **Smooth SVM** - - | ``loss_name``: 'sSVM' / 'smooth SVM' / 'smooth hinge' - | - | *Example:* ``loss = {'name': 'sSVM'}`` - - * - **Huber** - - | ``loss_name``: 'huber' / 'Huber' - | - | *Example:* ``loss = {'name': 'huber'}`` - - * - **SVR** - - | ``loss_name``: 'SVR' / 'svr' - | ``epsilon`` (*float*): 0.1 - | - | *Example:* ``loss = {'name': 'svr', 'epsilon': 0.1}`` - -manual ReHLine --------------- \ No newline at end of file diff --git a/doc/source/tutorials/ReHLine_ERM.rst b/doc/source/tutorials/ReHLine_ERM.rst new file mode 100644 index 0000000..4262a23 --- /dev/null +++ b/doc/source/tutorials/ReHLine_ERM.rst @@ -0,0 +1,26 @@ +ReHLine: Empirical Risk Minimization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The objective function is given by the following PLQ formulation, where :math:`\phi` is a convex piecewise linear function and :math:`\lambda` is a positive regularization parameter. + +.. math:: + + \min_{\pmb{\beta} \in \mathbb{R}^d} \sum_{i=1}^n \text{PLQ}(y_i, \mathbf{x}_i^T \pmb{\beta}) + \frac{1}{2} \| \pmb{\beta} \|_2^2, \ \text{ s.t. } \ + \mathbf{A} \pmb{\beta} + \mathbf{b} \geq \mathbf{0}, + +where :math:`\text{PLQ}(\cdot, \cdot)` is a convex piecewise linear quadratic function, see `Loss <./loss.rst>`_ for build-in loss functions, and :math:`\mathbf{A}` is a :math:`K \times d` matrix, and :math:`\mathbf{b}` is a :math:`K`-dimensional vector for linear constraints, see `Constraints <./constraint.rst>`_ for more details. + +For example, it supports the following loss functions and constraints. + +.. image:: ../figs/tab.png + +Example +------- + +.. nblinkgallery:: + :caption: Emprical Risk Minimization + :name: rst-link-gallery + + ../examples/QR.ipynb + ../examples/SVM.ipynb + ../examples/FairSVM.ipynb diff --git a/doc/source/tutorials/ReHLine_MF.rst b/doc/source/tutorials/ReHLine_MF.rst new file mode 100644 index 0000000..a7c2ec8 --- /dev/null +++ b/doc/source/tutorials/ReHLine_MF.rst @@ -0,0 +1,2 @@ +ReHLine: Matrix Factorization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \ No newline at end of file diff --git a/doc/source/tutorials/ReHLine_manual.rst b/doc/source/tutorials/ReHLine_manual.rst new file mode 100644 index 0000000..d1031a7 --- /dev/null +++ b/doc/source/tutorials/ReHLine_manual.rst @@ -0,0 +1,207 @@ +Manual ReHLine Formulation +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`ReHLine` is designed to address the regularized ReLU-ReHU minimization problem, named *ReHLine optimization*, of the following form: + +.. math:: + + \min_{\mathbf{\beta} \in \mathbb{R}^d} \sum_{i=1}^n \sum_{l=1}^L \text{ReLU}( u_{li} \mathbf{x}_i^\intercal \mathbf{\beta} + v_{li}) + \sum_{i=1}^n \sum_{h=1}^H {\text{ReHU}}_{\tau_{hi}}( s_{hi} \mathbf{x}_i^\intercal \mathbf{\beta} + t_{hi}) + \frac{1}{2} \| \mathbf{\beta} \|_2^2, \ \text{ s.t. } \mathbf{A} \mathbf{\beta} + \mathbf{b} \geq \mathbf{0}, + + +where :math:`\mathbf{U} = (u_{li}),\mathbf{V} = (v_{li}) \in \mathbb{R}^{L \times n}` +and :math:`\mathbf{S} = (s_{hi}),\mathbf{T} = (t_{hi}),\mathbf{\tau} = (\tau_{hi}) \in \mathbb{R}^{H \times n}` +are the ReLU-ReHU loss parameters, and :math:`(\mathbf{A},\mathbf{b})` are the constraint parameters. + +The key to using `ReHLine`` to solve any problem lies in utilizing custom ReHLine parameters to represent the problem, we illustrate this with following examples. Suppose that we have `X` and `y` as our data. + +.. code-block:: python + + ## Data + ## X : [n x d] + ## y : [n] + import numpy as np + n, d = X.shape + +.. note:: + + Most of the examples below can be directly implemented by `ReHLine: Empirical Risk Minimization <./tutorials/ReHLine_ERM.rst>`_; we are simply illustrating how to convert the problem to the ReHLine formulation. + +SVM +--- + +SVMs solve the following optimization problem: + +.. math:: + \min_{\mathbf{\beta} \in \mathbb{R}^d} \frac{C}{n} \sum_{i=1}^n ( 1 - y_i \mathbf{\beta}^\intercal \mathbf{x}_i )_+ + \frac{1}{2} \| \mathbf{\beta} \|_2^2 + +where :math:`\mathbf{x}_i \in \mathbb{R}^d` is a feature vector, and :math:`y_i \in \{-1, 1\}` is a binary label. Note that the SVM can be rewritten as a ReHLine optimization with + +.. math:: + \mathbf{U} \leftarrow -C \mathbf{y}^\intercal/n, \quad + \mathbf{V} \leftarrow C \mathbf{1}^\intercal_n/n, + +where :math:`\mathbf{1}_n = (1, \cdots, 1)^\intercal` is the $n$-length one vector, :math:`\mathbf{X} \in \mathbb{R}^{n \times d}` is the feature matrix, and :math:`\mathbf{y} = (y_1, \cdots, y_n)^\intercal` is the response vector. + +The python implementation is: + +.. code-block:: python + + ## SVM ReHLine parameters + clf = ReHLine() + ## U + clf.U = -(C*y).reshape(1,-1) + ## V + clf.V = (C*np.array(np.ones(n))).reshape(1,-1) + ## Fit + clf.fit(X) + +Smooth SVM +---------- + +Smoothed SVMs solve the following optimization problem: + +.. math:: + \min_{\mathbf{\beta} \in \mathbb{R}^d} \frac{C}{n} \sum_{i=1}^n V( y_i \mathbf{\beta}^\intercal \mathbf{x}_i ) + \frac{1}{2} \| \mathbf{\beta} \|_2^2 + +where :math:`\mathbf{x}_i \in \mathbb{R}^d` is a feature vector, and :math:`y_i \in \{-1, 1\}` is a binary label, and :math:`V(\cdot)` is the modified Huber loss or the smoothed hinge loss: + +.. math:: + \begin{equation*} + V(z) = + \begin{cases} + \ 0, & z \geq 1, \\ + \ (1-z)^2/2, & 0 < z \leq 1, \\ + \ (1/2 - z ), & z < 0. + \end{cases} + \end{equation*} + +Smoothed SVM can be rewritten as a ReHLine optimization with + +.. math:: + \mathbf{S} \leftarrow -\sqrt{C/n} \mathbf{y}^\intercal, \quad + \mathbf{T} \leftarrow \sqrt{C/n} \mathbf{1}^\intercal_n, \quad + \mathbf{\tau} \leftarrow \sqrt{C/n} \mathbf{1}^\intercal_n. + +where :math:`\mathbf{1}_n = (1, \cdots, 1)^\intercal` is the $n$-length one vector, :math:`\mathbf{X} \in \mathbb{R}^{n \times d}` is the feature matrix, and :math:`\mathbf{y} = (y_1, \cdots, y_n)^\intercal` is the response vector. + +The python implementation is: + +.. code-block:: python + + ## sSVM ReHLine parameters + clf = ReHLine() + ## S + clf.S = -(np.sqrt(C/n)*y).reshape(1,-1) + ## T + clf.T = (np.sqrt(C/n)*np.ones(n)).reshape(1,-1) + ## Tau + clf.Tau = (np.sqrt(C/n)*np.ones(n)).reshape(1,-1) + ## Fit + clf.fit(X) + +FairSVM +------- + +The SVM with fairness constraints (FairSVM) solves the following optimization problem: + +.. math:: + \begin{align} + & \min_{\mathbf{\beta} \in \mathbb{R}^d} \frac{C}{n} \sum_{i=1}^n ( 1 - y_i \mathbf{\beta}^\intercal \mathbf{x}_i )_+ + \frac{1}{2} \| \mathbf{\beta} \|_2^2, \nonumber \\ + \text{subj. to } & \quad \frac{1}{n} \sum_{i=1}^n \mathbf{z}_i \mathbf{\beta}^\intercal \mathbf{x}_i \leq \mathbf{\rho}, \quad \frac{1}{n} \sum_{i=1}^n \mathbf{z}_i \mathbf{\beta}^\intercal \mathbf{x}_i \geq -\mathbf{\rho}, + \end{align} + +where :math:`\mathbf{x}_i \in \mathbb{R}^d` is a feature vector, and :math:`y_i \in \{-1, 1\}` is a binary label, $\mathbf{z}_i$ is a collection of centered sensitive features + +.. math:: + \sum_{i=1}^n z_{ij} = 0, + +such as gender and/or race. The constraints limit the correlation between the $d_0$-length sensitive features :math:`\mathbf{z}_ i \in \mathbb{R}^{d_0}` and the decision function :math:`\mathbf{\beta}^\intercal \mathbf{x}`, and the constants :math:`\mathbf{\rho} \in \mathbb{R}_+^{d_0}` trade-offs predictive accuracy and fairness. Note that the FairSVM can be rewritten as a ReHLine optimization with + +.. math:: + \mathbf{U} \leftarrow -C \mathbf{y}^\intercal/n, \quad + \mathbf{V} \leftarrow C \mathbf{1}^\intercal_n/n, \quad + \mathbf{A} \leftarrow + \begin{pmatrix} + \mathbf{Z}^\intercal \mathbf{X} / n \\ + -\mathbf{Z}^\intercal \mathbf{X} / n + \end{pmatrix}, \quad + \mathbf{b} \leftarrow + \begin{pmatrix} + \mathbf{\rho} \\ + \mathbf{\rho} + \end{pmatrix} + +The python implementation is: + +.. code-block:: python + + ## FairSVM ReHLine parameters + clf = ReHLine() + ## U + clf.U = -(C*y).reshape(1,-1) + ## V + clf.V = (C*np.array(np.ones(n))).reshape(1,-1) + ## A + ## we illustrate that the first column of X as sensitive features, and tol is 0.1 + X_sen = X[:,0] + tol_sen = 0.1 + clf.A = np.repeat([X_sen @ X], repeats=[2], axis=0) / n + clf.A[1] = -clf.A[1] + ## b + clf.b = np.array([tol_sen, tol_sen]) + ## Fit + clf.fit(X) + +Ridge Huber regression +---------------------- + +The Ridge regularized Huber minimization (RidgeHuber) solves the following optimization problem: + +.. math:: + \min_{\mathbf{\beta}} \frac{1}{n} \sum_{i=1}^n H_\kappa( y_i - \mathbf{x}_i^\intercal \mathbf{\beta} ) + \frac{\lambda}{2} \| \mathbf{\beta} \|_2^2, + +where :math:`H_\kappa(\cdot)` is the Huber loss with a given parameter :math:`\kappa`: + +.. math:: + H_\kappa(z) = + \begin{cases} + z^2/2, & 0 < |z| \leq \kappa, \\ + \ \kappa ( |z| - \kappa/2 ), & |z| > \kappa. + \end{cases} + +In this case, the RidgeHuber can be rewritten as a ReHLine optimization with: + +.. math:: + \mathbf{S} \leftarrow + \begin{pmatrix} + -\sqrt{\frac{1}{n\lambda}} \mathbf{1}^\intercal_n \\ + \sqrt{\frac{1}{n\lambda}} \mathbf{1}^\intercal_n \\ + \end{pmatrix}, \quad + \mathbf{T} \leftarrow + \begin{pmatrix} + \sqrt{\frac{1}{n\lambda}} \mathbf{y}^\intercal \\ + -\sqrt{\frac{1}{n\lambda}} \mathbf{y}^\intercal \\ + \end{pmatrix}, \quad + \mathbf{\tau} \leftarrow + \begin{pmatrix} + \kappa \sqrt{\frac{1}{n\lambda}} \mathbf{1}^\intercal_n \\ + \\ + \kappa \sqrt{\frac{1}{n\lambda}} \mathbf{1}^\intercal_n \\ + \end{pmatrix}. + +The python implementation is: + +.. code-block:: python + + ## Huber ReHLine parameters + clf = ReHLine() + ## S + clf.S = -np.repeat([np.sqrt(1/n/lam)*np.ones(n)], repeats=[2], axis=0) + clf.S[1] = -clf.S[1] + ## T + clf.T = np.repeat([np.sqrt(1/n/lam)*y], repeats=[2], axis=0) + clf.T[1] = -clf.T[1] + ## Tau + clf.Tau = np.repeat([kappa*np.sqrt(1/n/lam)*np.ones(n)], repeats=[2], axis=0) + ## Fit + clf.fit(X) \ No newline at end of file diff --git a/doc/source/tutorials/constraint.rst b/doc/source/tutorials/constraint.rst new file mode 100644 index 0000000..817c7e3 --- /dev/null +++ b/doc/source/tutorials/constraint.rst @@ -0,0 +1,48 @@ +Constraint +********** + +Supported linear constraints in ReHLine are listed in the table below. + +Usage +----- + +.. code:: python + + # list of + # name (str): name of the custom loss function + # loss_kwargs: more keys and values for loss parameters + constraint = [{'name': , <**loss_kwargs>}, ...] + +.. list-table:: + :align: left + :widths: 5 20 15 + :header-rows: 1 + + * - constraint + - | args + - | Example + + * - **nonnegative** + - | ``name``: 'nonnegative' or '>=0' + - | ``loss={'name': '>=0'}`` + + * - **fair** + - | ``name``: 'fair' or 'fairness' + | ``X_sen``: 2d array [n x p] for sensitive attributes + | ``tol_sen``: 1d array [p] of tolerance for fairness + - | ``loss={'name': 'fair', 'X_sen': X_sen, 'tol_sen': tol_sen}`` + + * - **custom** + - | ``name``: 'custom' + | ``A``: 2d array [K x d] for linear constraint coefficients + | ``b``: 1d array [K] of constraint intercepts + - | ``loss={'name': 'custom', 'A': A, 'b': b}`` + +Related Examples +---------------- + +.. nblinkgallery:: + :caption: Constraints + :name: rst-link-gallery + + ../examples/FairSVM.ipynb diff --git a/doc/source/tutorials/loss.rst b/doc/source/tutorials/loss.rst new file mode 100644 index 0000000..967a388 --- /dev/null +++ b/doc/source/tutorials/loss.rst @@ -0,0 +1,72 @@ +Loss +**** + +Supported loss functions in ReHLine are listed in the table below. + +Usage +----- + +.. code:: python + + # name (str): name of the custom loss function + # loss_kwargs: more keys and values for loss parameters + loss = {'name': , <**loss_kwargs>} + + + +Classification loss +~~~~~~~~~~~~~~~~~~~ + +.. list-table:: + :align: left + :widths: 5 20 15 + :header-rows: 1 + + * - loss + - | args + - | Example + + * - **SVM** + - | ``name``: 'hinge' / 'svm' / 'SVM' + - | ``loss={'name': 'SVM'}`` + + * - **Smooth SVM** + - | ``name``: 'sSVM' / 'smooth SVM' / 'smooth hinge' + - | ``loss={'name': 'sSVM'}`` + + +Regression loss +~~~~~~~~~~~~~~~ + +.. list-table:: + :align: left + :widths: 5 20 15 + :header-rows: 1 + + * - loss + - | args + - | Example + + * - **Quantile Reg** + - | ``name``: 'check' / 'quantile' / 'QR' + | ``qt`` (*float*): qt + - | ``loss={'name': 'QR', 'qt': 0.25}`` + + * - **Huber** + - | ``name``: 'huber' / 'Huber' + - | ``loss={'name': 'huber'}`` + + * - **SVR** + - | ``name``: 'SVR' / 'svr' + | ``epsilon`` (*float*): 0.1 + - | ``loss={'name': 'svr', 'epsilon': 0.1}`` + +Related Examples +---------------- + +.. nblinkgallery:: + :caption: Constraints + :name: rst-link-gallery + + ../examples/QR.ipynb + ../examples/SVM.ipynb diff --git a/rehline/__init__.py b/rehline/__init__.py index 5d08c90..101ec69 100644 --- a/rehline/__init__.py +++ b/rehline/__init__.py @@ -5,9 +5,10 @@ from ._data import make_fair_classification from ._internal import rehline_internal, rehline_result -__all__ = ("_BaseReHLine", +__all__ = ("ReHLine_solver", + "_BaseReHLine", "ReHLine", "plqERM_Ridge", "_make_loss_rehline_param", "_make_constraint_rehline_param" - "make_fair_classification") \ No newline at end of file + "make_fair_classification") diff --git a/rehline/_class.py b/rehline/_class.py index 76ea4d9..55d9fe4 100644 --- a/rehline/_class.py +++ b/rehline/_class.py @@ -91,7 +91,7 @@ class ReHLine(_BaseReHLine, BaseEstimator): >>> U = -(C*y).reshape(1,-1) >>> L = U.shape[0] >>> V = (C*np.array(np.ones(n))).reshape(1,-1) - >>> clf = ReHLine(loss={'name': 'svm'}, C=C) + >>> clf = ReHLine(C=C) >>> clf.U, clf.V = U, V >>> clf.fit(X=X) >>> print('sol privided by rehline: %s' %clf.coef_) diff --git a/to-do.md b/to-do.md new file mode 100644 index 0000000..bbc86f5 --- /dev/null +++ b/to-do.md @@ -0,0 +1,11 @@ +# To-do list + +## Class +- [ ] Elastic Net ERM + +## Loss +- [ ] MAE +- [ ] TV + +## Constraint +- [ ] Monotonic constraints \ No newline at end of file