pyoframe 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyoframe/__init__.py +15 -0
- pyoframe/_arithmetic.py +228 -0
- pyoframe/constants.py +280 -0
- pyoframe/constraints.py +911 -0
- pyoframe/io.py +149 -0
- pyoframe/io_mappers.py +206 -0
- pyoframe/model.py +92 -0
- pyoframe/model_element.py +116 -0
- pyoframe/monkey_patch.py +54 -0
- pyoframe/objective.py +42 -0
- pyoframe/solvers.py +186 -0
- pyoframe/util.py +271 -0
- pyoframe/variables.py +193 -0
- pyoframe-0.0.4.dist-info/LICENSE +23 -0
- pyoframe-0.0.4.dist-info/METADATA +58 -0
- pyoframe-0.0.4.dist-info/RECORD +18 -0
- pyoframe-0.0.4.dist-info/WHEEL +5 -0
- pyoframe-0.0.4.dist-info/top_level.txt +1 -0
pyoframe/solvers.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Code to interface with various solvers
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import abstractmethod, ABC
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, Union, TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
import polars as pl
|
|
10
|
+
|
|
11
|
+
from pyoframe.constants import (
|
|
12
|
+
DUAL_KEY,
|
|
13
|
+
NAME_COL,
|
|
14
|
+
SOLUTION_KEY,
|
|
15
|
+
Result,
|
|
16
|
+
Solution,
|
|
17
|
+
Status,
|
|
18
|
+
)
|
|
19
|
+
import contextlib
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
24
|
+
from pyoframe.model import Model
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def solve(m: "Model", solver, **kwargs):
|
|
28
|
+
if solver == "gurobi":
|
|
29
|
+
result = GurobiSolver().solve(m, **kwargs)
|
|
30
|
+
else:
|
|
31
|
+
raise ValueError(f"Solver {solver} not recognized or supported.")
|
|
32
|
+
|
|
33
|
+
if result.solution is not None:
|
|
34
|
+
m.objective.value = result.solution.objective
|
|
35
|
+
|
|
36
|
+
for variable in m.variables:
|
|
37
|
+
variable.solution = result.solution.primal
|
|
38
|
+
|
|
39
|
+
if result.solution.dual is not None:
|
|
40
|
+
for constraint in m.constraints:
|
|
41
|
+
constraint.dual = result.solution.dual
|
|
42
|
+
|
|
43
|
+
return result
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class Solver(ABC):
|
|
47
|
+
@abstractmethod
|
|
48
|
+
def solve(self, model, directory: Optional[Path] = None, **kwargs) -> Result: ...
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class FileBasedSolver(Solver):
|
|
52
|
+
def solve(
|
|
53
|
+
self,
|
|
54
|
+
model: "Model",
|
|
55
|
+
directory: Optional[Union[Path, str]] = None,
|
|
56
|
+
use_var_names=None,
|
|
57
|
+
**kwargs,
|
|
58
|
+
) -> Result:
|
|
59
|
+
problem_file = None
|
|
60
|
+
if directory is not None:
|
|
61
|
+
if isinstance(directory, str):
|
|
62
|
+
directory = Path(directory)
|
|
63
|
+
if not directory.exists():
|
|
64
|
+
directory.mkdir(parents=True)
|
|
65
|
+
filename = model.name if model.name is not None else "pyoframe-problem"
|
|
66
|
+
problem_file = directory / f"{filename}.lp"
|
|
67
|
+
problem_file = model.to_file(problem_file, use_var_names=use_var_names)
|
|
68
|
+
assert model.io_mappers is not None
|
|
69
|
+
|
|
70
|
+
results = self.solve_from_lp(problem_file, **kwargs)
|
|
71
|
+
|
|
72
|
+
if results.solution is not None:
|
|
73
|
+
results.solution.primal = model.io_mappers.var_map.undo(
|
|
74
|
+
results.solution.primal
|
|
75
|
+
)
|
|
76
|
+
if results.solution.dual is not None:
|
|
77
|
+
results.solution.dual = model.io_mappers.const_map.undo(
|
|
78
|
+
results.solution.dual
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
return results
|
|
82
|
+
|
|
83
|
+
@abstractmethod
|
|
84
|
+
def solve_from_lp(self, problem_file: Path, **kwargs) -> Result: ...
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class GurobiSolver(FileBasedSolver):
|
|
88
|
+
def solve_from_lp(
|
|
89
|
+
self,
|
|
90
|
+
problem_fn,
|
|
91
|
+
log_fn=None,
|
|
92
|
+
warmstart_fn=None,
|
|
93
|
+
basis_fn=None,
|
|
94
|
+
solution_file=None,
|
|
95
|
+
env=None,
|
|
96
|
+
**solver_options,
|
|
97
|
+
) -> Result:
|
|
98
|
+
"""
|
|
99
|
+
Solve a linear problem using the gurobi solver.
|
|
100
|
+
|
|
101
|
+
This function communicates with gurobi using the gurubipy package.
|
|
102
|
+
"""
|
|
103
|
+
import gurobipy
|
|
104
|
+
|
|
105
|
+
# see https://www.gurobi.com/documentation/10.0/refman/optimization_status_codes.html
|
|
106
|
+
CONDITION_MAP = {
|
|
107
|
+
1: "unknown",
|
|
108
|
+
2: "optimal",
|
|
109
|
+
3: "infeasible",
|
|
110
|
+
4: "infeasible_or_unbounded",
|
|
111
|
+
5: "unbounded",
|
|
112
|
+
6: "other",
|
|
113
|
+
7: "iteration_limit",
|
|
114
|
+
8: "terminated_by_limit",
|
|
115
|
+
9: "time_limit",
|
|
116
|
+
10: "optimal",
|
|
117
|
+
11: "user_interrupt",
|
|
118
|
+
12: "other",
|
|
119
|
+
13: "suboptimal",
|
|
120
|
+
14: "unknown",
|
|
121
|
+
15: "terminated_by_limit",
|
|
122
|
+
16: "internal_solver_error",
|
|
123
|
+
17: "internal_solver_error",
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
with contextlib.ExitStack() as stack:
|
|
127
|
+
if env is None:
|
|
128
|
+
env = stack.enter_context(gurobipy.Env())
|
|
129
|
+
|
|
130
|
+
m = gurobipy.read(path_to_str(problem_fn), env=env)
|
|
131
|
+
if solver_options is not None:
|
|
132
|
+
for key, value in solver_options.items():
|
|
133
|
+
m.setParam(key, value)
|
|
134
|
+
if log_fn is not None:
|
|
135
|
+
m.setParam("logfile", path_to_str(log_fn))
|
|
136
|
+
if warmstart_fn:
|
|
137
|
+
m.read(path_to_str(warmstart_fn))
|
|
138
|
+
|
|
139
|
+
m.optimize()
|
|
140
|
+
|
|
141
|
+
if basis_fn:
|
|
142
|
+
try:
|
|
143
|
+
m.write(path_to_str(basis_fn))
|
|
144
|
+
except gurobipy.GurobiError as err:
|
|
145
|
+
print("No model basis stored. Raised error: %s", err)
|
|
146
|
+
|
|
147
|
+
condition = m.status
|
|
148
|
+
termination_condition = CONDITION_MAP.get(condition, condition)
|
|
149
|
+
status = Status.from_termination_condition(termination_condition)
|
|
150
|
+
|
|
151
|
+
if status.is_ok:
|
|
152
|
+
if solution_file:
|
|
153
|
+
m.write(path_to_str(solution_file))
|
|
154
|
+
|
|
155
|
+
objective = m.ObjVal
|
|
156
|
+
vars = m.getVars()
|
|
157
|
+
sol = pl.DataFrame(
|
|
158
|
+
{
|
|
159
|
+
NAME_COL: m.getAttr("VarName", vars),
|
|
160
|
+
SOLUTION_KEY: m.getAttr("X", vars),
|
|
161
|
+
}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
constraints = m.getConstrs()
|
|
165
|
+
try:
|
|
166
|
+
dual = pl.DataFrame(
|
|
167
|
+
{
|
|
168
|
+
DUAL_KEY: m.getAttr("Pi", constraints),
|
|
169
|
+
NAME_COL: m.getAttr("ConstrName", constraints),
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
except gurobipy.GurobiError:
|
|
173
|
+
dual = None
|
|
174
|
+
|
|
175
|
+
solution = Solution(sol, dual, objective)
|
|
176
|
+
else:
|
|
177
|
+
solution = None
|
|
178
|
+
|
|
179
|
+
return Result(status, solution, m)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def path_to_str(path: Union[Path, str]) -> str:
|
|
183
|
+
"""
|
|
184
|
+
Convert a pathlib.Path to a string.
|
|
185
|
+
"""
|
|
186
|
+
return str(path.resolve()) if isinstance(path, Path) else path
|
pyoframe/util.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File containing utility functions and classes.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import abstractmethod, ABC
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from typing import Any, Dict, Iterable, Optional, Union
|
|
8
|
+
|
|
9
|
+
import polars as pl
|
|
10
|
+
import pandas as pd
|
|
11
|
+
|
|
12
|
+
from pyoframe.constants import COEF_KEY, CONST_TERM, RESERVED_COL_KEYS, VAR_KEY
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class IdCounterMixin(ABC):
|
|
16
|
+
"""
|
|
17
|
+
Provides a method that assigns a unique ID to each row in a DataFrame.
|
|
18
|
+
IDs start at 1 and go up consecutively. No zero ID is assigned since it is reserved for the constant variable term.
|
|
19
|
+
IDs are only unique for the subclass since different subclasses have different counters.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# Keys are the subclass names and values are the next unasigned ID.
|
|
23
|
+
_id_counters: Dict[str, int] = defaultdict(lambda: 1)
|
|
24
|
+
|
|
25
|
+
@classmethod
|
|
26
|
+
def _reset_counters(cls):
|
|
27
|
+
"""
|
|
28
|
+
Resets all the ID counters.
|
|
29
|
+
This function is called before every unit test to reset the code state.
|
|
30
|
+
"""
|
|
31
|
+
cls._id_counters = defaultdict(lambda: 1)
|
|
32
|
+
|
|
33
|
+
def _assign_ids(self, df: pl.DataFrame) -> pl.DataFrame:
|
|
34
|
+
"""
|
|
35
|
+
Adds the column `to_column` to the DataFrame `df` with the next batch
|
|
36
|
+
of unique consecutive IDs.
|
|
37
|
+
"""
|
|
38
|
+
cls_name = self.__class__.__name__
|
|
39
|
+
cur_count = self._id_counters[cls_name]
|
|
40
|
+
id_col_name = self.get_id_column_name()
|
|
41
|
+
|
|
42
|
+
if df.height == 0:
|
|
43
|
+
df = df.with_columns(pl.lit(cur_count).alias(id_col_name))
|
|
44
|
+
else:
|
|
45
|
+
df = df.with_columns(
|
|
46
|
+
pl.int_range(cur_count, cur_count + pl.len()).alias(id_col_name)
|
|
47
|
+
)
|
|
48
|
+
df = df.with_columns(pl.col(id_col_name).cast(pl.UInt32))
|
|
49
|
+
self._id_counters[cls_name] += df.height
|
|
50
|
+
return df
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
@abstractmethod
|
|
54
|
+
def get_id_column_name(cls) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Returns the name of the column containing the IDs.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def ids(self) -> pl.DataFrame:
|
|
62
|
+
"""
|
|
63
|
+
Returns a dataframe with the IDs and any other relevant columns (i.e. the dimension columns).
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_obj_repr(obj: object, _props: Iterable[str] = (), **kwargs):
|
|
68
|
+
"""
|
|
69
|
+
Helper function to generate __repr__ strings for classes. See usage for examples.
|
|
70
|
+
"""
|
|
71
|
+
props = {prop: getattr(obj, prop) for prop in _props}
|
|
72
|
+
props_str = " ".join(f"{k}={v}" for k, v in props.items() if v is not None)
|
|
73
|
+
if props_str:
|
|
74
|
+
props_str += " "
|
|
75
|
+
kwargs_str = " ".join(f"{k}={v}" for k, v in kwargs.items() if v is not None)
|
|
76
|
+
return f"<{obj.__class__.__name__} {props_str}{kwargs_str}>"
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def parse_inputs_as_iterable(
|
|
80
|
+
*inputs: Union[Any, Iterable[Any]],
|
|
81
|
+
) -> Iterable[Any]:
|
|
82
|
+
"""
|
|
83
|
+
Converts a parameter *x: Any | Iteraable[Any] to a single Iterable[Any] object.
|
|
84
|
+
This is helpful to support these two ways of passing arguments:
|
|
85
|
+
- foo([1, 2, 3])
|
|
86
|
+
- foo(1, 2, 3)
|
|
87
|
+
|
|
88
|
+
Inspired from the polars library.
|
|
89
|
+
"""
|
|
90
|
+
if not inputs:
|
|
91
|
+
return []
|
|
92
|
+
|
|
93
|
+
# Treat elements of a single iterable as separate inputs
|
|
94
|
+
if len(inputs) == 1 and _is_iterable(inputs[0]):
|
|
95
|
+
return inputs[0]
|
|
96
|
+
|
|
97
|
+
return inputs
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _is_iterable(input: Union[Any, Iterable[Any]]) -> bool:
|
|
101
|
+
# Inspired from the polars library
|
|
102
|
+
return isinstance(input, Iterable) and not isinstance(
|
|
103
|
+
input,
|
|
104
|
+
(str, bytes, pl.DataFrame, pl.Series, pd.DataFrame, pd.Series, pd.Index, dict),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def concat_dimensions(
|
|
109
|
+
df: pl.DataFrame,
|
|
110
|
+
prefix: Optional[str] = None,
|
|
111
|
+
keep_dims: bool = True,
|
|
112
|
+
ignore_columns=RESERVED_COL_KEYS,
|
|
113
|
+
replace_spaces: bool = True,
|
|
114
|
+
to_col: str = "concated_dim",
|
|
115
|
+
) -> pl.DataFrame:
|
|
116
|
+
"""
|
|
117
|
+
Returns a new DataFrame with the column 'concated_dim'. Reserved columns are ignored.
|
|
118
|
+
|
|
119
|
+
Parameters:
|
|
120
|
+
df : pl.DataFrame
|
|
121
|
+
The input DataFrame.
|
|
122
|
+
prefix : str, optional
|
|
123
|
+
The prefix to be added to the concated dimension.
|
|
124
|
+
keep_dims : bool, optional
|
|
125
|
+
If True, the original dimensions are kept in the new DataFrame.
|
|
126
|
+
|
|
127
|
+
Examples:
|
|
128
|
+
>>> import polars as pl
|
|
129
|
+
>>> df = pl.DataFrame(
|
|
130
|
+
... {
|
|
131
|
+
... "dim1": [1, 2, 3, 1, 2, 3],
|
|
132
|
+
... "dim2": ["Y", "Y", "Y", "N", "N", "N"],
|
|
133
|
+
... }
|
|
134
|
+
... )
|
|
135
|
+
>>> concat_dimensions(df)
|
|
136
|
+
shape: (6, 3)
|
|
137
|
+
┌──────┬──────┬──────────────┐
|
|
138
|
+
│ dim1 ┆ dim2 ┆ concated_dim │
|
|
139
|
+
│ --- ┆ --- ┆ --- │
|
|
140
|
+
│ i64 ┆ str ┆ str │
|
|
141
|
+
╞══════╪══════╪══════════════╡
|
|
142
|
+
│ 1 ┆ Y ┆ [1,Y] │
|
|
143
|
+
│ 2 ┆ Y ┆ [2,Y] │
|
|
144
|
+
│ 3 ┆ Y ┆ [3,Y] │
|
|
145
|
+
│ 1 ┆ N ┆ [1,N] │
|
|
146
|
+
│ 2 ┆ N ┆ [2,N] │
|
|
147
|
+
│ 3 ┆ N ┆ [3,N] │
|
|
148
|
+
└──────┴──────┴──────────────┘
|
|
149
|
+
>>> concat_dimensions(df, prefix="x")
|
|
150
|
+
shape: (6, 3)
|
|
151
|
+
┌──────┬──────┬──────────────┐
|
|
152
|
+
│ dim1 ┆ dim2 ┆ concated_dim │
|
|
153
|
+
│ --- ┆ --- ┆ --- │
|
|
154
|
+
│ i64 ┆ str ┆ str │
|
|
155
|
+
╞══════╪══════╪══════════════╡
|
|
156
|
+
│ 1 ┆ Y ┆ x[1,Y] │
|
|
157
|
+
│ 2 ┆ Y ┆ x[2,Y] │
|
|
158
|
+
│ 3 ┆ Y ┆ x[3,Y] │
|
|
159
|
+
│ 1 ┆ N ┆ x[1,N] │
|
|
160
|
+
│ 2 ┆ N ┆ x[2,N] │
|
|
161
|
+
│ 3 ┆ N ┆ x[3,N] │
|
|
162
|
+
└──────┴──────┴──────────────┘
|
|
163
|
+
>>> concat_dimensions(df, keep_dims=False)
|
|
164
|
+
shape: (6, 1)
|
|
165
|
+
┌──────────────┐
|
|
166
|
+
│ concated_dim │
|
|
167
|
+
│ --- │
|
|
168
|
+
│ str │
|
|
169
|
+
╞══════════════╡
|
|
170
|
+
│ [1,Y] │
|
|
171
|
+
│ [2,Y] │
|
|
172
|
+
│ [3,Y] │
|
|
173
|
+
│ [1,N] │
|
|
174
|
+
│ [2,N] │
|
|
175
|
+
│ [3,N] │
|
|
176
|
+
└──────────────┘
|
|
177
|
+
>>> # Properly handles cases with no dimensions and ignores reserved columns
|
|
178
|
+
>>> df = pl.DataFrame({VAR_KEY: [1, 2]})
|
|
179
|
+
>>> concat_dimensions(df, prefix="x")
|
|
180
|
+
shape: (2, 2)
|
|
181
|
+
┌───────────────┬──────────────┐
|
|
182
|
+
│ __variable_id ┆ concated_dim │
|
|
183
|
+
│ --- ┆ --- │
|
|
184
|
+
│ i64 ┆ str │
|
|
185
|
+
╞═══════════════╪══════════════╡
|
|
186
|
+
│ 1 ┆ x │
|
|
187
|
+
│ 2 ┆ x │
|
|
188
|
+
└───────────────┴──────────────┘
|
|
189
|
+
"""
|
|
190
|
+
if prefix is None:
|
|
191
|
+
prefix = ""
|
|
192
|
+
dimensions = [col for col in df.columns if col not in ignore_columns]
|
|
193
|
+
if dimensions:
|
|
194
|
+
query = pl.concat_str(
|
|
195
|
+
pl.lit(prefix + "["),
|
|
196
|
+
pl.concat_str(*dimensions, separator=","),
|
|
197
|
+
pl.lit("]"),
|
|
198
|
+
)
|
|
199
|
+
else:
|
|
200
|
+
query = pl.lit(prefix)
|
|
201
|
+
|
|
202
|
+
df = df.with_columns(query.alias(to_col))
|
|
203
|
+
|
|
204
|
+
if replace_spaces:
|
|
205
|
+
df = df.with_columns(pl.col(to_col).str.replace_all(" ", "_"))
|
|
206
|
+
|
|
207
|
+
if not keep_dims:
|
|
208
|
+
df = df.drop(*dimensions)
|
|
209
|
+
|
|
210
|
+
return df
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def cast_coef_to_string(
|
|
214
|
+
df: pl.DataFrame, column_name: str = COEF_KEY, drop_ones=True, float_precision=None
|
|
215
|
+
) -> pl.DataFrame:
|
|
216
|
+
"""
|
|
217
|
+
Parameters:
|
|
218
|
+
df : pl.DataFrame
|
|
219
|
+
The input DataFrame.
|
|
220
|
+
column_name : str, optional
|
|
221
|
+
The name of the column to be casted.
|
|
222
|
+
drop_ones : bool, optional
|
|
223
|
+
If True, 1s are replaced with an empty string for non-constant terms.
|
|
224
|
+
float_precision : int, optional
|
|
225
|
+
The number of decimal places to round the coefficients to. If None, no rounding is done (so Polars' default precision is used).
|
|
226
|
+
Examples:
|
|
227
|
+
>>> import polars as pl
|
|
228
|
+
>>> df = pl.DataFrame({"x": [1.0, -2.0, 1.0, 4.0], VAR_KEY: [1, 2, 0, 4]})
|
|
229
|
+
>>> cast_coef_to_string(df, "x")
|
|
230
|
+
shape: (4, 2)
|
|
231
|
+
┌─────┬───────────────┐
|
|
232
|
+
│ x ┆ __variable_id │
|
|
233
|
+
│ --- ┆ --- │
|
|
234
|
+
│ str ┆ i64 │
|
|
235
|
+
╞═════╪═══════════════╡
|
|
236
|
+
│ + ┆ 1 │
|
|
237
|
+
│ -2 ┆ 2 │
|
|
238
|
+
│ +1 ┆ 0 │
|
|
239
|
+
│ +4 ┆ 4 │
|
|
240
|
+
└─────┴───────────────┘
|
|
241
|
+
"""
|
|
242
|
+
df = df.with_columns(
|
|
243
|
+
pl.col(column_name).abs(),
|
|
244
|
+
_sign=pl.when(pl.col(column_name) < 0).then(pl.lit("-")).otherwise(pl.lit("+")),
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
if float_precision is not None:
|
|
248
|
+
df = df.with_columns(pl.col(column_name).round(float_precision))
|
|
249
|
+
|
|
250
|
+
df = df.with_columns(
|
|
251
|
+
pl.when(pl.col(column_name) == pl.col(column_name).floor())
|
|
252
|
+
.then(pl.col(column_name).cast(pl.Int64).cast(pl.String))
|
|
253
|
+
.otherwise(pl.col(column_name).cast(pl.String))
|
|
254
|
+
.alias(column_name)
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
if drop_ones:
|
|
258
|
+
condition = pl.col(column_name) == str(1)
|
|
259
|
+
if VAR_KEY in df.columns:
|
|
260
|
+
condition = condition & (pl.col(VAR_KEY) != CONST_TERM)
|
|
261
|
+
df = df.with_columns(
|
|
262
|
+
pl.when(condition)
|
|
263
|
+
.then(pl.lit(""))
|
|
264
|
+
.otherwise(pl.col(column_name))
|
|
265
|
+
.alias(column_name)
|
|
266
|
+
)
|
|
267
|
+
else:
|
|
268
|
+
df = df.with_columns(pl.col(column_name).cast(pl.Utf8))
|
|
269
|
+
return df.with_columns(pl.concat_str("_sign", column_name).alias(column_name)).drop(
|
|
270
|
+
"_sign"
|
|
271
|
+
)
|
pyoframe/variables.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File containing Variable class representing decision variables in optimization models.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
from typing import Iterable
|
|
7
|
+
|
|
8
|
+
import polars as pl
|
|
9
|
+
|
|
10
|
+
from pyoframe.constraints import SupportsMath, Set
|
|
11
|
+
|
|
12
|
+
from pyoframe.constants import COEF_KEY, SOLUTION_KEY, VAR_KEY, VType, VTypeValue
|
|
13
|
+
from pyoframe.constraints import Expression
|
|
14
|
+
from pyoframe.model_element import ModelElement
|
|
15
|
+
from pyoframe.constraints import SetTypes
|
|
16
|
+
from pyoframe.util import IdCounterMixin, get_obj_repr
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Variable(ModelElement, SupportsMath, IdCounterMixin):
|
|
20
|
+
"""
|
|
21
|
+
Represents one or many decision variable in an optimization model.
|
|
22
|
+
|
|
23
|
+
Parameters:
|
|
24
|
+
*indexing_sets: SetTypes (typically a DataFrame or Set)
|
|
25
|
+
If no indexing_sets are provided, a single variable with no dimensions is created.
|
|
26
|
+
Otherwise, a variable is created for each element in the Cartesian product of the indexing_sets (see Set for details on behaviour).
|
|
27
|
+
lb: float
|
|
28
|
+
The lower bound for all variables.
|
|
29
|
+
ub: float
|
|
30
|
+
The upper bound for all variables.
|
|
31
|
+
vtype: VType | VTypeValue
|
|
32
|
+
The type of the variable. Can be either a VType enum or a string. Default is VType.CONTINUOUS.
|
|
33
|
+
|
|
34
|
+
Examples:
|
|
35
|
+
>>> import pandas as pd
|
|
36
|
+
>>> from pyoframe import Variable
|
|
37
|
+
>>> df = pd.DataFrame({"dim1": [1, 1, 2, 2, 3, 3], "dim2": ["a", "b", "a", "b", "a", "b"]})
|
|
38
|
+
>>> Variable(df)
|
|
39
|
+
<Variable lb=-inf ub=inf size=6 dimensions={'dim1': 3, 'dim2': 2}>
|
|
40
|
+
[1,a]: x1
|
|
41
|
+
[1,b]: x2
|
|
42
|
+
[2,a]: x3
|
|
43
|
+
[2,b]: x4
|
|
44
|
+
[3,a]: x5
|
|
45
|
+
[3,b]: x6
|
|
46
|
+
>>> Variable(df[["dim1"]])
|
|
47
|
+
Traceback (most recent call last):
|
|
48
|
+
...
|
|
49
|
+
ValueError: Duplicate rows found in input data.
|
|
50
|
+
>>> Variable(df[["dim1"]].drop_duplicates())
|
|
51
|
+
<Variable lb=-inf ub=inf size=3 dimensions={'dim1': 3}>
|
|
52
|
+
[1]: x7
|
|
53
|
+
[2]: x8
|
|
54
|
+
[3]: x9
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
# TODO: Breaking change, remove support for Iterable[AcceptableSets]
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
*indexing_sets: SetTypes | Iterable[SetTypes],
|
|
61
|
+
lb: float = float("-inf"),
|
|
62
|
+
ub: float = float("inf"),
|
|
63
|
+
vtype: VType | VTypeValue = VType.CONTINUOUS,
|
|
64
|
+
):
|
|
65
|
+
data = Set(*indexing_sets).data if len(indexing_sets) > 0 else pl.DataFrame()
|
|
66
|
+
data = self._assign_ids(data)
|
|
67
|
+
|
|
68
|
+
super().__init__(data)
|
|
69
|
+
|
|
70
|
+
self.vtype: VType = VType(vtype)
|
|
71
|
+
|
|
72
|
+
# Tightening the bounds is not strictly necessary, but it adds clarity
|
|
73
|
+
if self.vtype == VType.BINARY:
|
|
74
|
+
lb, ub = 0, 1
|
|
75
|
+
|
|
76
|
+
self.lb = lb
|
|
77
|
+
self.ub = ub
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def get_id_column_name(cls):
|
|
81
|
+
return VAR_KEY
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def solution(self):
|
|
85
|
+
if SOLUTION_KEY not in self.data.columns:
|
|
86
|
+
raise ValueError(f"No solution solution found for Variable '{self.name}'.")
|
|
87
|
+
df = self.data.select(self.dimensions_unsafe + [SOLUTION_KEY])
|
|
88
|
+
if df.shape == (1, 1):
|
|
89
|
+
return df.item()
|
|
90
|
+
return df
|
|
91
|
+
|
|
92
|
+
@solution.setter
|
|
93
|
+
def solution(self, value):
|
|
94
|
+
assert sorted(value.columns) == sorted([SOLUTION_KEY, VAR_KEY])
|
|
95
|
+
df = self.data
|
|
96
|
+
if SOLUTION_KEY in self.data.columns:
|
|
97
|
+
df = df.drop(SOLUTION_KEY)
|
|
98
|
+
self._data = df.join(value, on=VAR_KEY, how="left", validate="1:1")
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def ids(self):
|
|
102
|
+
return self.data.select(self.dimensions_unsafe + [VAR_KEY])
|
|
103
|
+
|
|
104
|
+
def __repr__(self):
|
|
105
|
+
return (
|
|
106
|
+
get_obj_repr(
|
|
107
|
+
self, ("name", "lb", "ub"), size=self.data.height, dimensions=self.shape
|
|
108
|
+
)
|
|
109
|
+
+ "\n"
|
|
110
|
+
+ self.to_expr().to_str(max_line_len=80, max_rows=10)
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
def to_expr(self) -> Expression:
|
|
114
|
+
return self._new(self.data.drop(SOLUTION_KEY))
|
|
115
|
+
|
|
116
|
+
def _new(self, data: pl.DataFrame):
|
|
117
|
+
e = Expression(data.with_columns(pl.lit(1.0).alias(COEF_KEY)))
|
|
118
|
+
e._model = self._model
|
|
119
|
+
# We propogate the unmatched strategy intentionally. Without this a .keep_unmatched() on a variable would always be lost.
|
|
120
|
+
e.unmatched_strategy = self.unmatched_strategy
|
|
121
|
+
e.allowed_new_dims = self.allowed_new_dims
|
|
122
|
+
return e
|
|
123
|
+
|
|
124
|
+
def next(self, dim: str, wrap_around: bool = False) -> Expression:
|
|
125
|
+
"""
|
|
126
|
+
Creates an expression where the variable at each index is the next variable in the specified dimension.
|
|
127
|
+
|
|
128
|
+
Parameters:
|
|
129
|
+
dim:
|
|
130
|
+
The dimension over which to shift the variable.
|
|
131
|
+
wrap_around:
|
|
132
|
+
If True, the last index in the dimension is connected to the first index.
|
|
133
|
+
|
|
134
|
+
Examples:
|
|
135
|
+
>>> import pandas as pd
|
|
136
|
+
>>> from pyoframe import Variable, Model
|
|
137
|
+
>>> time_dim = pd.DataFrame({"time": ["00:00", "06:00", "12:00", "18:00"]})
|
|
138
|
+
>>> space_dim = pd.DataFrame({"city": ["Toronto", "Berlin"]})
|
|
139
|
+
>>> m = Model()
|
|
140
|
+
>>> m.bat_charge = Variable(time_dim, space_dim)
|
|
141
|
+
>>> m.bat_flow = Variable(time_dim, space_dim)
|
|
142
|
+
>>> # Fails because the dimensions are not the same
|
|
143
|
+
>>> m.bat_charge + m.bat_flow == m.bat_charge.next("time")
|
|
144
|
+
Traceback (most recent call last):
|
|
145
|
+
...
|
|
146
|
+
pyoframe._arithmetic.PyoframeError: Failed to add expressions:
|
|
147
|
+
<Expression size=8 dimensions={'time': 4, 'city': 2} terms=16> + <Expression size=6 dimensions={'city': 2, 'time': 3} terms=6>
|
|
148
|
+
Due to error:
|
|
149
|
+
Dataframe has unmatched values. If this is intentional, use .drop_unmatched() or .keep_unmatched()
|
|
150
|
+
shape: (2, 4)
|
|
151
|
+
┌───────┬─────────┬────────────┬────────────┐
|
|
152
|
+
│ time ┆ city ┆ time_right ┆ city_right │
|
|
153
|
+
│ --- ┆ --- ┆ --- ┆ --- │
|
|
154
|
+
│ str ┆ str ┆ str ┆ str │
|
|
155
|
+
╞═══════╪═════════╪════════════╪════════════╡
|
|
156
|
+
│ 18:00 ┆ Toronto ┆ null ┆ null │
|
|
157
|
+
│ 18:00 ┆ Berlin ┆ null ┆ null │
|
|
158
|
+
└───────┴─────────┴────────────┴────────────┘
|
|
159
|
+
|
|
160
|
+
>>> (m.bat_charge + m.bat_flow).drop_unmatched() == m.bat_charge.next("time")
|
|
161
|
+
<Constraint sense='=' size=6 dimensions={'time': 3, 'city': 2} terms=18>
|
|
162
|
+
[00:00,Berlin]: bat_charge[00:00,Berlin] + bat_flow[00:00,Berlin] - bat_charge[06:00,Berlin] = 0
|
|
163
|
+
[00:00,Toronto]: bat_charge[00:00,Toronto] + bat_flow[00:00,Toronto] - bat_charge[06:00,Toronto] = 0
|
|
164
|
+
[06:00,Berlin]: bat_charge[06:00,Berlin] + bat_flow[06:00,Berlin] - bat_charge[12:00,Berlin] = 0
|
|
165
|
+
[06:00,Toronto]: bat_charge[06:00,Toronto] + bat_flow[06:00,Toronto] - bat_charge[12:00,Toronto] = 0
|
|
166
|
+
[12:00,Berlin]: bat_charge[12:00,Berlin] + bat_flow[12:00,Berlin] - bat_charge[18:00,Berlin] = 0
|
|
167
|
+
[12:00,Toronto]: bat_charge[12:00,Toronto] + bat_flow[12:00,Toronto] - bat_charge[18:00,Toronto] = 0
|
|
168
|
+
|
|
169
|
+
>>> (m.bat_charge + m.bat_flow) == m.bat_charge.next("time", wrap_around=True)
|
|
170
|
+
<Constraint sense='=' size=8 dimensions={'time': 4, 'city': 2} terms=24>
|
|
171
|
+
[00:00,Berlin]: bat_charge[00:00,Berlin] + bat_flow[00:00,Berlin] - bat_charge[06:00,Berlin] = 0
|
|
172
|
+
[00:00,Toronto]: bat_charge[00:00,Toronto] + bat_flow[00:00,Toronto] - bat_charge[06:00,Toronto] = 0
|
|
173
|
+
[06:00,Berlin]: bat_charge[06:00,Berlin] + bat_flow[06:00,Berlin] - bat_charge[12:00,Berlin] = 0
|
|
174
|
+
[06:00,Toronto]: bat_charge[06:00,Toronto] + bat_flow[06:00,Toronto] - bat_charge[12:00,Toronto] = 0
|
|
175
|
+
[12:00,Berlin]: bat_charge[12:00,Berlin] + bat_flow[12:00,Berlin] - bat_charge[18:00,Berlin] = 0
|
|
176
|
+
[12:00,Toronto]: bat_charge[12:00,Toronto] + bat_flow[12:00,Toronto] - bat_charge[18:00,Toronto] = 0
|
|
177
|
+
[18:00,Berlin]: bat_charge[18:00,Berlin] + bat_flow[18:00,Berlin] - bat_charge[00:00,Berlin] = 0
|
|
178
|
+
[18:00,Toronto]: bat_charge[18:00,Toronto] + bat_flow[18:00,Toronto] - bat_charge[00:00,Toronto] = 0
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
wrapped = self.data.select(dim).unique(maintain_order=True).sort(by=dim)
|
|
182
|
+
wrapped = wrapped.with_columns(pl.col(dim).shift(-1).alias("__next"))
|
|
183
|
+
if wrap_around:
|
|
184
|
+
wrapped = wrapped.with_columns(pl.col("__next").fill_null(pl.first(dim)))
|
|
185
|
+
else:
|
|
186
|
+
wrapped = wrapped.drop_nulls(dim)
|
|
187
|
+
|
|
188
|
+
expr = self.to_expr()
|
|
189
|
+
data = expr.data.rename({dim: "__prev"})
|
|
190
|
+
data = data.join(
|
|
191
|
+
wrapped, left_on="__prev", right_on="__next", how="inner"
|
|
192
|
+
).drop(["__prev", "__next"])
|
|
193
|
+
return expr._new(data)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright 2024 Bravos Power
|
|
4
|
+
Copyright 2021-2023 Fabian Hofmann
|
|
5
|
+
Copyright 2015-2021 PyPSA Developers
|
|
6
|
+
|
|
7
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
8
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
9
|
+
in the Software without restriction, including without limitation the rights
|
|
10
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
11
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
12
|
+
furnished to do so, subject to the following conditions:
|
|
13
|
+
|
|
14
|
+
The above copyright notice and this permission notice shall be included in all
|
|
15
|
+
copies or substantial portions of the Software.
|
|
16
|
+
|
|
17
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
18
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
19
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
20
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
21
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
22
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
23
|
+
SOFTWARE.
|