pylppinv 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pylppinv-1.0.3/LICENSE ADDED
@@ -0,0 +1,7 @@
1
+ Copyright 2021 econcz
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,193 @@
1
+ Metadata-Version: 2.4
2
+ Name: pylppinv
3
+ Version: 1.0.3
4
+ Summary: Linear Programming via Pseudoinverse Estimation
5
+ Author-email: The Economist <29724411+econcz@users.noreply.github.com>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/econcz/pylppinv
8
+ Project-URL: Bug Tracker, https://github.com/econcz/pylppinv/issues
9
+ Keywords: linear-programing,convex-optimization,least-squares,generalized-inverse,regularization
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
16
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
17
+ Requires-Python: >=3.10
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: numpy>=1.24
21
+ Requires-Dist: pyclsp>=1.1.2
22
+ Dynamic: license-file
23
+
24
+ # Linear Programming via Pseudoinverse Estimation
25
+
26
+ The **Linear Programming via Pseudoinverse Estimation (LPPinv)** is a two-stage estimation method that reformulates linear programs as structured least-squares problems. Based on the [Convex Least Squares Programming (CLSP)](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming") framework, LPPinv solves linear inequality, equality, and bound constraints by (1) constructing a canonical constraint system and computing a pseudoinverse projection, followed by (2) a convex-programming correction stage to refine the solution under additional regularization (e.g., Lasso, Ridge, or Elastic Net).
27
+ LPPinv is intended for **underdetermined** and **ill-posed** linear problems, for which standard solvers fail.
28
+
29
+ ## Installation
30
+
31
+ ```bash
32
+ pip install pylppinv
33
+ ```
34
+
35
+ ## Quick Example
36
+
37
+ ```python
38
+ from lppinv import lppinv
39
+ import numpy as np
40
+
41
+ # Define inequality constraints A_ub @ x <= b_ub
42
+ A_ub = [
43
+ [1, 1],
44
+ [2, 1]
45
+ ]
46
+ b_ub = [5, 8]
47
+
48
+ # Define equality constraints A_eq @ x = b_eq
49
+ A_eq = [
50
+ [1, -1]
51
+ ]
52
+ b_eq = [1]
53
+
54
+ # Define bounds for x1 and x2
55
+ bounds = [(0, 5), (0, None)]
56
+
57
+ # Run the LP via CLSP
58
+ result = lppinv(
59
+ c = [1, 1], # not used in CLSP but included for compatibility
60
+ A_ub = A_ub,
61
+ b_ub = b_ub,
62
+ A_eq = A_eq,
63
+ b_eq = b_eq,
64
+ bounds = bounds
65
+ )
66
+
67
+ # Output solution
68
+ print("Solution vector (x):")
69
+ print(result.x.flatten())
70
+ ```
71
+
72
+ ## User Reference
73
+
74
+ For comprehensive information on the estimator’s capabilities, advanced configuration options, and implementation details, please refer to the [pyclsp module](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming"), on which LPPinv is based.
75
+
76
+ **LPPINV Parameters:**
77
+
78
+ `c` : *array_like* of shape *(p,)*, optional
79
+ Objective function coefficients. Accepted for API parity; not used by CLSP.
80
+
81
+ `A_ub` : *array_like* of shape *(i, p)*, optional
82
+ Matrix for inequality constraints `A_ub @ x <= b_ub`.
83
+
84
+ `b_ub` : *array_like* of shape *(i,)*, optional
85
+ Right-hand side vector for inequality constraints.
86
+
87
+ `A_eq` : *array_like* of shape *(j, p)*, optional
88
+ Matrix for equality constraints `A_eq @ x = b_eq`.
89
+
90
+ `b_eq` : *array_like* of shape *(j,)*, optional
91
+ Right-hand side vector for equality constraints.
92
+
93
+ `bounds` : *sequence* of *(low, high)*, optional
94
+ Bounds on variables. If a single tuple **(low, high)** is given, it is applied to all variables. If None, defaults to *(0, None)* for each variable (non-negativity).
95
+
96
+ Please note that either `A_ub` and `b_ub` or `A_eq` and `b_eq` must be provided.
97
+
98
+ **CLSP Parameters:**
99
+
100
+ `r` : *int*, default = *1*
101
+ Number of refinement iterations for the pseudoinverse-based estimator.
102
+
103
+ `Z` : *np.ndarray* or *None*
104
+ A symmetric idempotent matrix (projector) defining the subspace for Bott–Duffin pseudoinversion. If *None*, the identity matrix is used, reducing the Bott–Duffin inverse to the Moore–Penrose case.
105
+
106
+ `tolerance` : *float*, default = *square root of machine epsilon*
107
+ Convergence tolerance for NRMSE change between refinement iterations.
108
+
109
+ `iteration_limit` : *int*, default = *50*
110
+ Maximum number of iterations allowed in the refinement loop.
111
+
112
+ `final` : *bool*, default = *True*
113
+ If *True*, a convex programming problem is solved to refine `zhat`. The resulting solution `z` minimizes a weighted L1/L2 norm around `zhat` subject to `Az = b`.
114
+
115
+ `alpha` : *float*, default = *1.0*
116
+ Regularization parameter (weight) in the final convex program:
117
+ - `α = 0`: Lasso (L1 norm)
118
+ - `α = 1`: Tikhonov Regularization/Ridge (L2 norm)
119
+ - `0 < α < 1`: Elastic Net
120
+
121
+ `*args`, `**kwargs` : optional
122
+ CVXPY arguments passed to the CVXPY solver.
123
+
124
+ **Returns:**
125
+ *self*
126
+
127
+ `self.A` : *np.ndarray*
128
+ Design matrix `A` = [`C` | `S`; `M` | `Q`], where `Q` is either a zero matrix or *S_residual*.
129
+
130
+ `self.b` : *np.ndarray*
131
+ Vector of the right-hand side.
132
+
133
+ `self.zhat` : *np.ndarray*
134
+ Vector of the first-step estimate.
135
+
136
+ `self.r` : *int*
137
+ Number of refinement iterations performed in the first step.
138
+
139
+ `self.z` : *np.ndarray*
140
+ Vector of the final solution. If the second step is disabled, it equals `self.zhat`.
141
+
142
+ `self.x` : *np.ndarray*
143
+ `m × p` matrix or vector containing the variable component of `z`.
144
+
145
+ `self.y` : *np.ndarray*
146
+ Vector containing the slack component of `z`.
147
+
148
+ `self.kappaC` : *float*
149
+ Spectral κ() for *C_canon*.
150
+
151
+ `self.kappaB` : *float*
152
+ Spectral κ() for *B* = *C_canon^+ A*.
153
+
154
+ `self.kappaA` : *float*
155
+ Spectral κ() for `A`.
156
+
157
+ `self.rmsa` : *float*
158
+ Total root mean square alignment (RMSA).
159
+
160
+ `self.r2_partial` : *float*
161
+ R² for the `M` block in `A`.
162
+
163
+ `self.nrmse` : *float*
164
+ Mean square error calculated from `A` and normalized by standard deviation (NRMSE).
165
+
166
+ `self.nrmse_partial` : *float*
167
+ Mean square error from the `M` block in `A` and normalized by standard deviation (NRMSE).
168
+
169
+ `self.z_lower` : *np.ndarray*
170
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
171
+
172
+ `self.z_upper` : *np.ndarray*
173
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
174
+
175
+ `self.x_lower` : *np.ndarray*
176
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
177
+
178
+ `self.x_upper` : *np.ndarray*
179
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
180
+
181
+ `self.y_lower` : *np.ndarray*
182
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
183
+
184
+ `self.y_upper` : *np.ndarray*
185
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
186
+
187
+ ## Bibliography
188
+
189
+ To be added.
190
+
191
+ ## License
192
+
193
+ MIT License — see the [LICENSE](LICENSE) file.
@@ -0,0 +1,170 @@
1
+ # Linear Programming via Pseudoinverse Estimation
2
+
3
+ The **Linear Programming via Pseudoinverse Estimation (LPPinv)** is a two-stage estimation method that reformulates linear programs as structured least-squares problems. Based on the [Convex Least Squares Programming (CLSP)](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming") framework, LPPinv solves linear inequality, equality, and bound constraints by (1) constructing a canonical constraint system and computing a pseudoinverse projection, followed by (2) a convex-programming correction stage to refine the solution under additional regularization (e.g., Lasso, Ridge, or Elastic Net).
4
+ LPPinv is intended for **underdetermined** and **ill-posed** linear problems, for which standard solvers fail.
5
+
6
+ ## Installation
7
+
8
+ ```bash
9
+ pip install pylppinv
10
+ ```
11
+
12
+ ## Quick Example
13
+
14
+ ```python
15
+ from lppinv import lppinv
16
+ import numpy as np
17
+
18
+ # Define inequality constraints A_ub @ x <= b_ub
19
+ A_ub = [
20
+ [1, 1],
21
+ [2, 1]
22
+ ]
23
+ b_ub = [5, 8]
24
+
25
+ # Define equality constraints A_eq @ x = b_eq
26
+ A_eq = [
27
+ [1, -1]
28
+ ]
29
+ b_eq = [1]
30
+
31
+ # Define bounds for x1 and x2
32
+ bounds = [(0, 5), (0, None)]
33
+
34
+ # Run the LP via CLSP
35
+ result = lppinv(
36
+ c = [1, 1], # not used in CLSP but included for compatibility
37
+ A_ub = A_ub,
38
+ b_ub = b_ub,
39
+ A_eq = A_eq,
40
+ b_eq = b_eq,
41
+ bounds = bounds
42
+ )
43
+
44
+ # Output solution
45
+ print("Solution vector (x):")
46
+ print(result.x.flatten())
47
+ ```
48
+
49
+ ## User Reference
50
+
51
+ For comprehensive information on the estimator’s capabilities, advanced configuration options, and implementation details, please refer to the [pyclsp module](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming"), on which LPPinv is based.
52
+
53
+ **LPPINV Parameters:**
54
+
55
+ `c` : *array_like* of shape *(p,)*, optional
56
+ Objective function coefficients. Accepted for API parity; not used by CLSP.
57
+
58
+ `A_ub` : *array_like* of shape *(i, p)*, optional
59
+ Matrix for inequality constraints `A_ub @ x <= b_ub`.
60
+
61
+ `b_ub` : *array_like* of shape *(i,)*, optional
62
+ Right-hand side vector for inequality constraints.
63
+
64
+ `A_eq` : *array_like* of shape *(j, p)*, optional
65
+ Matrix for equality constraints `A_eq @ x = b_eq`.
66
+
67
+ `b_eq` : *array_like* of shape *(j,)*, optional
68
+ Right-hand side vector for equality constraints.
69
+
70
+ `bounds` : *sequence* of *(low, high)*, optional
71
+ Bounds on variables. If a single tuple **(low, high)** is given, it is applied to all variables. If None, defaults to *(0, None)* for each variable (non-negativity).
72
+
73
+ Please note that either `A_ub` and `b_ub` or `A_eq` and `b_eq` must be provided.
74
+
75
+ **CLSP Parameters:**
76
+
77
+ `r` : *int*, default = *1*
78
+ Number of refinement iterations for the pseudoinverse-based estimator.
79
+
80
+ `Z` : *np.ndarray* or *None*
81
+ A symmetric idempotent matrix (projector) defining the subspace for Bott–Duffin pseudoinversion. If *None*, the identity matrix is used, reducing the Bott–Duffin inverse to the Moore–Penrose case.
82
+
83
+ `tolerance` : *float*, default = *square root of machine epsilon*
84
+ Convergence tolerance for NRMSE change between refinement iterations.
85
+
86
+ `iteration_limit` : *int*, default = *50*
87
+ Maximum number of iterations allowed in the refinement loop.
88
+
89
+ `final` : *bool*, default = *True*
90
+ If *True*, a convex programming problem is solved to refine `zhat`. The resulting solution `z` minimizes a weighted L1/L2 norm around `zhat` subject to `Az = b`.
91
+
92
+ `alpha` : *float*, default = *1.0*
93
+ Regularization parameter (weight) in the final convex program:
94
+ - `α = 0`: Lasso (L1 norm)
95
+ - `α = 1`: Tikhonov Regularization/Ridge (L2 norm)
96
+ - `0 < α < 1`: Elastic Net
97
+
98
+ `*args`, `**kwargs` : optional
99
+ CVXPY arguments passed to the CVXPY solver.
100
+
101
+ **Returns:**
102
+ *self*
103
+
104
+ `self.A` : *np.ndarray*
105
+ Design matrix `A` = [`C` | `S`; `M` | `Q`], where `Q` is either a zero matrix or *S_residual*.
106
+
107
+ `self.b` : *np.ndarray*
108
+ Vector of the right-hand side.
109
+
110
+ `self.zhat` : *np.ndarray*
111
+ Vector of the first-step estimate.
112
+
113
+ `self.r` : *int*
114
+ Number of refinement iterations performed in the first step.
115
+
116
+ `self.z` : *np.ndarray*
117
+ Vector of the final solution. If the second step is disabled, it equals `self.zhat`.
118
+
119
+ `self.x` : *np.ndarray*
120
+ `m × p` matrix or vector containing the variable component of `z`.
121
+
122
+ `self.y` : *np.ndarray*
123
+ Vector containing the slack component of `z`.
124
+
125
+ `self.kappaC` : *float*
126
+ Spectral κ() for *C_canon*.
127
+
128
+ `self.kappaB` : *float*
129
+ Spectral κ() for *B* = *C_canon^+ A*.
130
+
131
+ `self.kappaA` : *float*
132
+ Spectral κ() for `A`.
133
+
134
+ `self.rmsa` : *float*
135
+ Total root mean square alignment (RMSA).
136
+
137
+ `self.r2_partial` : *float*
138
+ R² for the `M` block in `A`.
139
+
140
+ `self.nrmse` : *float*
141
+ Mean square error calculated from `A` and normalized by standard deviation (NRMSE).
142
+
143
+ `self.nrmse_partial` : *float*
144
+ Mean square error from the `M` block in `A` and normalized by standard deviation (NRMSE).
145
+
146
+ `self.z_lower` : *np.ndarray*
147
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
148
+
149
+ `self.z_upper` : *np.ndarray*
150
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
151
+
152
+ `self.x_lower` : *np.ndarray*
153
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
154
+
155
+ `self.x_upper` : *np.ndarray*
156
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
157
+
158
+ `self.y_lower` : *np.ndarray*
159
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
160
+
161
+ `self.y_upper` : *np.ndarray*
162
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
163
+
164
+ ## Bibliography
165
+
166
+ To be added.
167
+
168
+ ## License
169
+
170
+ MIT License — see the [LICENSE](LICENSE) file.
@@ -0,0 +1,8 @@
1
+ __version__ = "1.0.3"
2
+
3
+ from .lppinv import lppinv
4
+
5
+ __all__ = [
6
+ "lppinv",
7
+ "__version__"
8
+ ]
@@ -0,0 +1,158 @@
1
+ import numpy as np
2
+ from typing import Sequence
3
+ from collections.abc import Sequence as sq
4
+ from clsp import CLSP
5
+
6
+ class LPPinvInputError(Exception):
7
+ """
8
+ Exception class for LPPinv-related input errors.
9
+
10
+ Represents internal failures in Linear Programming via Pseudoinversion
11
+ routines due to malformed or incompatible input. Supports structured
12
+ messaging and optional diagnostic augmentation.
13
+
14
+ Parameters
15
+ ----------
16
+ message : str, optional
17
+ Description of the error. Defaults to a generic LPPinv message.
18
+
19
+ code : int or str, optional
20
+ Optional error code or identifier for downstream handling.
21
+
22
+ Usage
23
+ -----
24
+ raise LPPinvInputError("A_ub and b_ub are incompatible", code=201)
25
+ """
26
+
27
+ def __init__(self, message: str = "An error occurred in LPPinv",
28
+ code: int | str | None = None):
29
+ self.message = message
30
+ self.code = code
31
+ full_message = f"{message} (Code: {code})" if code is not None \
32
+ else message
33
+ super().__init__(full_message)
34
+
35
+ def __str__(self) -> str:
36
+ return self.message if self.code is None \
37
+ else f"{self.message} [Code: {self.code}]"
38
+
39
+ def as_dict(self) -> dict:
40
+ """
41
+ Return the error as a dictionary for structured logging or JSON output.
42
+ """
43
+ return {"error": self.message, "code": self.code}
44
+
45
+ def lppinv(
46
+ c: Sequence[float] | None = None,
47
+ A_ub: Sequence[Sequence[float]] | None = None,
48
+ b_ub: Sequence[float] | None = None,
49
+ A_eq: Sequence[Sequence[float]] | None = None,
50
+ b_eq: Sequence[float] | None = None,
51
+ bounds: list[tuple[float | None, float | None]] | \
52
+ tuple[float | None, float | None] | None = None,
53
+ *args, **kwargs
54
+ ) -> CLSP:
55
+ """
56
+ Solve a linear program via Convex Least Squares Programming (CLSP)
57
+ estimator.
58
+
59
+ Parameters (SciPy linprog-compatible)
60
+ -------------------------------------
61
+ c : array_like of shape (p,), optional
62
+ Objective function coefficients. Accepted for API parity; not used
63
+ by CLSP.
64
+ A_ub : array_like of shape (i, p), optional
65
+ Matrix for inequality constraints A_ub @ x <= b_ub.
66
+ b_ub : array_like of shape (i,), optional
67
+ Right-hand side vector for inequality constraints.
68
+ A_eq : array_like of shape (j, p), optional
69
+ Matrix for equality constraints A_eq @ x = b_eq.
70
+ b_eq : array_like of shape (j,), optional
71
+ Right-hand side vector for equality constraints.
72
+ bounds : sequence of (low, high), optional
73
+ Bounds on variables. If a single tuple (low, high) is given, it is
74
+ applied to all variables. If None, defaults to (0, None) for each
75
+ variable (non-negativity).
76
+
77
+ Returns
78
+ -------
79
+ CLSP
80
+ The fitted CLSP object. Consult https://pypi.org/project/pyclsp/
81
+ """
82
+ # assert conformability of constraint sets (A_ub, b_ub) and (A_eq, b_eq)
83
+ if not ((A_ub is not None and b_ub is not None) or
84
+ (A_eq is not None and b_eq is not None)):
85
+ raise LPPinvInputError("At least one complete constraint set "
86
+ "(A_ub, b_ub) or (A_eq, b_eq) must be "
87
+ "provided.")
88
+ if A_ub is not None:
89
+ A_ub = np.asarray(A_ub, dtype=np.float64)
90
+ if A_ub.ndim == 1:
91
+ A_ub = A_ub.reshape(1, -1)
92
+ b_ub = np.asarray(b_ub, dtype=np.float64).reshape(-1, 1)
93
+ if A_ub.shape[0] != b_ub.shape[0]:
94
+ raise LPPinvInputError(f"A_ub and b_ub must have the same number "
95
+ f"of rows: "
96
+ f"{A_ub.shape[0]} vs {b_ub.shape[0]}")
97
+ n_vars = A_ub.shape[1] # number of variables
98
+ if A_eq is not None:
99
+ A_eq = np.asarray(A_eq, dtype=np.float64)
100
+ if A_eq.ndim == 1:
101
+ A_eq = A_eq.reshape(1, -1)
102
+ b_eq = np.asarray(b_eq, dtype=np.float64).reshape(-1, 1)
103
+ if A_eq.shape[0] != b_eq.shape[0]:
104
+ raise LPPinvInputError(f"A_eq and b_eq must have the same number "
105
+ f"of rows: "
106
+ f"{A_eq.shape[0]} vs {b_eq.shape[0]}")
107
+ n_vars = A_eq.shape[1] # number of variables
108
+
109
+ # (b) Construct the right-hand side vector
110
+ if bounds is None: # normalize bounds
111
+ bounds = (0, None)
112
+ if isinstance(bounds, tuple):
113
+ bounds = [bounds] * n_vars # replicate (low, high)
114
+ elif isinstance(bounds, sq):
115
+ if len(bounds) > 1 and len(bounds) != n_vars:
116
+ raise LPPinvInputError(f"Bounds length {len(bounds)} does not "
117
+ f"match number of variables {n_vars}.")
118
+ elif len(bounds) == 1:
119
+ bounds = bounds * n_vars # replicate (low, high)
120
+ if any((l is not None and l < 0) or
121
+ (h is not None and h < 0) for l, h in bounds):
122
+ raise LPPinvInputError("Negative lower or upper bounds are not "
123
+ "allowed in linear programs.")
124
+ b = np.empty((0, 1))
125
+ if b_ub is not None:
126
+ b = b_ub
127
+ if b_eq is not None:
128
+ b = np.vstack([b, b_eq])
129
+ b = np.vstack([b, np.array([l if l is not None else 0 for l, h
130
+ in bounds]).reshape(-1, 1),
131
+ np.array([h if h is not None else np.inf for l, h
132
+ in bounds]).reshape(-1, 1)])
133
+
134
+ # (C), (S) Construct conformable blocks for the design matrix A
135
+ if A_ub is not None and A_eq is not None:
136
+ if A_ub.shape[1] != A_eq.shape[1]:
137
+ raise LPPinvInputError(f"A_ub and A_eq must have the same number "
138
+ f"of columns: "
139
+ f"{A_ub.shape[1]} vs {A_eq.shape[1]}")
140
+ C = np.empty((0, n_vars))
141
+ S = np.empty((0, 0))
142
+ if A_ub is not None:
143
+ C = A_ub
144
+ S = np.eye(A_ub.shape[0])
145
+ if A_eq is not None:
146
+ C = np.vstack([C, A_eq])
147
+ S = np.vstack([S, np.zeros((A_eq.shape[0], S.shape[1]))])
148
+ C = np.vstack([C, np.tile(np.eye(n_vars), (2,1))])
149
+ S = np.vstack([np.hstack([S, np.zeros((S.shape[0], n_vars))]), np.tile(
150
+ np.hstack([np.zeros((n_vars, S.shape[1])), np.eye(n_vars)]),
151
+ (2,1))])
152
+
153
+ # perform estimation and return the result
154
+ finite_rows = np.isfinite(b).ravel() # drop rows with np.inf
155
+ return CLSP().solve(problem='general', C=C[finite_rows, :],
156
+ S=S[finite_rows, :],
157
+ b=b[finite_rows],
158
+ *args, **kwargs)
@@ -0,0 +1,193 @@
1
+ Metadata-Version: 2.4
2
+ Name: pylppinv
3
+ Version: 1.0.3
4
+ Summary: Linear Programming via Pseudoinverse Estimation
5
+ Author-email: The Economist <29724411+econcz@users.noreply.github.com>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/econcz/pylppinv
8
+ Project-URL: Bug Tracker, https://github.com/econcz/pylppinv/issues
9
+ Keywords: linear-programing,convex-optimization,least-squares,generalized-inverse,regularization
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
16
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
17
+ Requires-Python: >=3.10
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: numpy>=1.24
21
+ Requires-Dist: pyclsp>=1.1.2
22
+ Dynamic: license-file
23
+
24
+ # Linear Programming via Pseudoinverse Estimation
25
+
26
+ The **Linear Programming via Pseudoinverse Estimation (LPPinv)** is a two-stage estimation method that reformulates linear programs as structured least-squares problems. Based on the [Convex Least Squares Programming (CLSP)](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming") framework, LPPinv solves linear inequality, equality, and bound constraints by (1) constructing a canonical constraint system and computing a pseudoinverse projection, followed by (2) a convex-programming correction stage to refine the solution under additional regularization (e.g., Lasso, Ridge, or Elastic Net).
27
+ LPPinv is intended for **underdetermined** and **ill-posed** linear problems, for which standard solvers fail.
28
+
29
+ ## Installation
30
+
31
+ ```bash
32
+ pip install pylppinv
33
+ ```
34
+
35
+ ## Quick Example
36
+
37
+ ```python
38
+ from lppinv import lppinv
39
+ import numpy as np
40
+
41
+ # Define inequality constraints A_ub @ x <= b_ub
42
+ A_ub = [
43
+ [1, 1],
44
+ [2, 1]
45
+ ]
46
+ b_ub = [5, 8]
47
+
48
+ # Define equality constraints A_eq @ x = b_eq
49
+ A_eq = [
50
+ [1, -1]
51
+ ]
52
+ b_eq = [1]
53
+
54
+ # Define bounds for x1 and x2
55
+ bounds = [(0, 5), (0, None)]
56
+
57
+ # Run the LP via CLSP
58
+ result = lppinv(
59
+ c = [1, 1], # not used in CLSP but included for compatibility
60
+ A_ub = A_ub,
61
+ b_ub = b_ub,
62
+ A_eq = A_eq,
63
+ b_eq = b_eq,
64
+ bounds = bounds
65
+ )
66
+
67
+ # Output solution
68
+ print("Solution vector (x):")
69
+ print(result.x.flatten())
70
+ ```
71
+
72
+ ## User Reference
73
+
74
+ For comprehensive information on the estimator’s capabilities, advanced configuration options, and implementation details, please refer to the [pyclsp module](https://pypi.org/project/pyclsp/ "Convex Least Squares Programming"), on which LPPinv is based.
75
+
76
+ **LPPINV Parameters:**
77
+
78
+ `c` : *array_like* of shape *(p,)*, optional
79
+ Objective function coefficients. Accepted for API parity; not used by CLSP.
80
+
81
+ `A_ub` : *array_like* of shape *(i, p)*, optional
82
+ Matrix for inequality constraints `A_ub @ x <= b_ub`.
83
+
84
+ `b_ub` : *array_like* of shape *(i,)*, optional
85
+ Right-hand side vector for inequality constraints.
86
+
87
+ `A_eq` : *array_like* of shape *(j, p)*, optional
88
+ Matrix for equality constraints `A_eq @ x = b_eq`.
89
+
90
+ `b_eq` : *array_like* of shape *(j,)*, optional
91
+ Right-hand side vector for equality constraints.
92
+
93
+ `bounds` : *sequence* of *(low, high)*, optional
94
+ Bounds on variables. If a single tuple **(low, high)** is given, it is applied to all variables. If None, defaults to *(0, None)* for each variable (non-negativity).
95
+
96
+ Please note that either `A_ub` and `b_ub` or `A_eq` and `b_eq` must be provided.
97
+
98
+ **CLSP Parameters:**
99
+
100
+ `r` : *int*, default = *1*
101
+ Number of refinement iterations for the pseudoinverse-based estimator.
102
+
103
+ `Z` : *np.ndarray* or *None*
104
+ A symmetric idempotent matrix (projector) defining the subspace for Bott–Duffin pseudoinversion. If *None*, the identity matrix is used, reducing the Bott–Duffin inverse to the Moore–Penrose case.
105
+
106
+ `tolerance` : *float*, default = *square root of machine epsilon*
107
+ Convergence tolerance for NRMSE change between refinement iterations.
108
+
109
+ `iteration_limit` : *int*, default = *50*
110
+ Maximum number of iterations allowed in the refinement loop.
111
+
112
+ `final` : *bool*, default = *True*
113
+ If *True*, a convex programming problem is solved to refine `zhat`. The resulting solution `z` minimizes a weighted L1/L2 norm around `zhat` subject to `Az = b`.
114
+
115
+ `alpha` : *float*, default = *1.0*
116
+ Regularization parameter (weight) in the final convex program:
117
+ - `α = 0`: Lasso (L1 norm)
118
+ - `α = 1`: Tikhonov Regularization/Ridge (L2 norm)
119
+ - `0 < α < 1`: Elastic Net
120
+
121
+ `*args`, `**kwargs` : optional
122
+ CVXPY arguments passed to the CVXPY solver.
123
+
124
+ **Returns:**
125
+ *self*
126
+
127
+ `self.A` : *np.ndarray*
128
+ Design matrix `A` = [`C` | `S`; `M` | `Q`], where `Q` is either a zero matrix or *S_residual*.
129
+
130
+ `self.b` : *np.ndarray*
131
+ Vector of the right-hand side.
132
+
133
+ `self.zhat` : *np.ndarray*
134
+ Vector of the first-step estimate.
135
+
136
+ `self.r` : *int*
137
+ Number of refinement iterations performed in the first step.
138
+
139
+ `self.z` : *np.ndarray*
140
+ Vector of the final solution. If the second step is disabled, it equals `self.zhat`.
141
+
142
+ `self.x` : *np.ndarray*
143
+ `m × p` matrix or vector containing the variable component of `z`.
144
+
145
+ `self.y` : *np.ndarray*
146
+ Vector containing the slack component of `z`.
147
+
148
+ `self.kappaC` : *float*
149
+ Spectral κ() for *C_canon*.
150
+
151
+ `self.kappaB` : *float*
152
+ Spectral κ() for *B* = *C_canon^+ A*.
153
+
154
+ `self.kappaA` : *float*
155
+ Spectral κ() for `A`.
156
+
157
+ `self.rmsa` : *float*
158
+ Total root mean square alignment (RMSA).
159
+
160
+ `self.r2_partial` : *float*
161
+ R² for the `M` block in `A`.
162
+
163
+ `self.nrmse` : *float*
164
+ Mean square error calculated from `A` and normalized by standard deviation (NRMSE).
165
+
166
+ `self.nrmse_partial` : *float*
167
+ Mean square error from the `M` block in `A` and normalized by standard deviation (NRMSE).
168
+
169
+ `self.z_lower` : *np.ndarray*
170
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
171
+
172
+ `self.z_upper` : *np.ndarray*
173
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
174
+
175
+ `self.x_lower` : *np.ndarray*
176
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
177
+
178
+ `self.x_upper` : *np.ndarray*
179
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
180
+
181
+ `self.y_lower` : *np.ndarray*
182
+ Lower bound of the diagnostic interval (confidence band) based on κ(`A`).
183
+
184
+ `self.y_upper` : *np.ndarray*
185
+ Upper bound of the diagnostic interval (confidence band) based on κ(`A`).
186
+
187
+ ## Bibliography
188
+
189
+ To be added.
190
+
191
+ ## License
192
+
193
+ MIT License — see the [LICENSE](LICENSE) file.
@@ -0,0 +1,10 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ lppinv/__init__.py
5
+ lppinv/lppinv.py
6
+ pylppinv.egg-info/PKG-INFO
7
+ pylppinv.egg-info/SOURCES.txt
8
+ pylppinv.egg-info/dependency_links.txt
9
+ pylppinv.egg-info/requires.txt
10
+ pylppinv.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ numpy>=1.24
2
+ pyclsp>=1.1.2
@@ -0,0 +1 @@
1
+ lppinv
@@ -0,0 +1,42 @@
1
+ [project]
2
+ name = "pylppinv"
3
+ version = "1.0.3"
4
+ description = "Linear Programming via Pseudoinverse Estimation"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ license = "MIT"
8
+ license-files = ["LICENSE"]
9
+ authors = [
10
+ { name = "The Economist", email = "29724411+econcz@users.noreply.github.com" }
11
+ ]
12
+ keywords = [
13
+ "linear-programing",
14
+ "convex-optimization",
15
+ "least-squares",
16
+ "generalized-inverse",
17
+ "regularization"
18
+ ]
19
+ classifiers = [
20
+ "Programming Language :: Python :: 3",
21
+ "Programming Language :: Python :: 3.10",
22
+ "Programming Language :: Python :: 3.11",
23
+ "Programming Language :: Python :: 3.12",
24
+ "Operating System :: OS Independent",
25
+ "Topic :: Scientific/Engineering :: Mathematics",
26
+ "Topic :: Scientific/Engineering :: Information Analysis"
27
+ ]
28
+ dependencies = [
29
+ "numpy>=1.24",
30
+ "pyclsp>=1.1.2"
31
+ ]
32
+
33
+ [project.urls]
34
+ Homepage = "https://github.com/econcz/pylppinv"
35
+ "Bug Tracker" = "https://github.com/econcz/pylppinv/issues"
36
+
37
+ [build-system]
38
+ requires = ["setuptools>=77", "wheel"]
39
+ build-backend = "setuptools.build_meta"
40
+
41
+ [tool.setuptools]
42
+ packages = ["lppinv"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+