pyoframe 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyoframe/__init__.py +12 -3
- pyoframe/_arithmetic.py +2 -5
- pyoframe/constants.py +15 -12
- pyoframe/{constraints.py → core.py} +490 -74
- pyoframe/io.py +51 -25
- pyoframe/io_mappers.py +49 -18
- pyoframe/model.py +65 -42
- pyoframe/model_element.py +124 -18
- pyoframe/monkey_patch.py +2 -2
- pyoframe/objective.py +16 -13
- pyoframe/solvers.py +276 -109
- pyoframe/user_defined.py +60 -0
- pyoframe/util.py +56 -55
- {pyoframe-0.0.4.dist-info → pyoframe-0.0.5.dist-info}/METADATA +9 -2
- pyoframe-0.0.5.dist-info/RECORD +18 -0
- pyoframe/variables.py +0 -193
- pyoframe-0.0.4.dist-info/RECORD +0 -18
- {pyoframe-0.0.4.dist-info → pyoframe-0.0.5.dist-info}/LICENSE +0 -0
- {pyoframe-0.0.4.dist-info → pyoframe-0.0.5.dist-info}/WHEEL +0 -0
- {pyoframe-0.0.4.dist-info → pyoframe-0.0.5.dist-info}/top_level.txt +0 -0
|
@@ -8,6 +8,7 @@ from typing import (
|
|
|
8
8
|
overload,
|
|
9
9
|
Union,
|
|
10
10
|
Optional,
|
|
11
|
+
TYPE_CHECKING,
|
|
11
12
|
)
|
|
12
13
|
from abc import ABC, abstractmethod
|
|
13
14
|
|
|
@@ -21,20 +22,36 @@ from pyoframe.constants import (
|
|
|
21
22
|
CONSTRAINT_KEY,
|
|
22
23
|
DUAL_KEY,
|
|
23
24
|
RESERVED_COL_KEYS,
|
|
25
|
+
SLACK_COL,
|
|
24
26
|
VAR_KEY,
|
|
27
|
+
SOLUTION_KEY,
|
|
28
|
+
RC_COL,
|
|
29
|
+
VType,
|
|
30
|
+
VTypeValue,
|
|
25
31
|
Config,
|
|
26
32
|
ConstraintSense,
|
|
27
33
|
UnmatchedStrategy,
|
|
34
|
+
PyoframeError,
|
|
35
|
+
ObjSense,
|
|
28
36
|
)
|
|
29
37
|
from pyoframe.util import (
|
|
30
|
-
IdCounterMixin,
|
|
31
38
|
cast_coef_to_string,
|
|
32
39
|
concat_dimensions,
|
|
33
40
|
get_obj_repr,
|
|
34
41
|
parse_inputs_as_iterable,
|
|
42
|
+
unwrap_single_values,
|
|
43
|
+
dataframe_to_tupled_list,
|
|
44
|
+
FuncArgs,
|
|
35
45
|
)
|
|
36
46
|
|
|
37
|
-
from pyoframe.model_element import
|
|
47
|
+
from pyoframe.model_element import (
|
|
48
|
+
ModelElement,
|
|
49
|
+
ModelElementWithId,
|
|
50
|
+
SupportPolarsMethodMixin,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
54
|
+
from pyoframe.model import Model
|
|
38
55
|
|
|
39
56
|
VAR_TYPE = pl.UInt32
|
|
40
57
|
|
|
@@ -54,9 +71,10 @@ class SupportsToExpr(Protocol):
|
|
|
54
71
|
class SupportsMath(ABC, SupportsToExpr):
|
|
55
72
|
"""Any object that can be converted into an expression."""
|
|
56
73
|
|
|
57
|
-
def __init__(self):
|
|
74
|
+
def __init__(self, **kwargs):
|
|
58
75
|
self.unmatched_strategy = UnmatchedStrategy.UNSET
|
|
59
76
|
self.allowed_new_dims: List[str] = []
|
|
77
|
+
super().__init__(**kwargs)
|
|
60
78
|
|
|
61
79
|
def keep_unmatched(self):
|
|
62
80
|
self.unmatched_strategy = UnmatchedStrategy.KEEP
|
|
@@ -71,8 +89,7 @@ class SupportsMath(ABC, SupportsToExpr):
|
|
|
71
89
|
return self
|
|
72
90
|
|
|
73
91
|
@abstractmethod
|
|
74
|
-
def to_expr(self) -> "Expression":
|
|
75
|
-
raise NotImplementedError
|
|
92
|
+
def to_expr(self) -> "Expression": ...
|
|
76
93
|
|
|
77
94
|
__add__ = _forward_to_expression("__add__")
|
|
78
95
|
__mul__ = _forward_to_expression("__mul__")
|
|
@@ -145,16 +162,17 @@ SetTypes = Union[
|
|
|
145
162
|
SupportsMath,
|
|
146
163
|
Mapping[str, Sequence[object]],
|
|
147
164
|
"Set",
|
|
165
|
+
"Constraint",
|
|
148
166
|
]
|
|
149
167
|
|
|
150
168
|
|
|
151
|
-
class Set(ModelElement, SupportsMath):
|
|
169
|
+
class Set(ModelElement, SupportsMath, SupportPolarsMethodMixin):
|
|
152
170
|
def __init__(self, *data: SetTypes | Iterable[SetTypes], **named_data):
|
|
153
171
|
data_list = list(data)
|
|
154
172
|
for name, set in named_data.items():
|
|
155
173
|
data_list.append({name: set})
|
|
156
174
|
df = self._parse_acceptable_sets(*data_list)
|
|
157
|
-
if df.is_duplicated().any():
|
|
175
|
+
if not df.is_empty() and df.is_duplicated().any():
|
|
158
176
|
raise ValueError("Duplicate rows found in input data.")
|
|
159
177
|
super().__init__(df)
|
|
160
178
|
|
|
@@ -224,20 +242,34 @@ class Set(ModelElement, SupportsMath):
|
|
|
224
242
|
|
|
225
243
|
def __add__(self, other):
|
|
226
244
|
if isinstance(other, Set):
|
|
227
|
-
|
|
245
|
+
try:
|
|
246
|
+
return self._new(
|
|
247
|
+
pl.concat([self.data, other.data]).unique(maintain_order=True)
|
|
248
|
+
)
|
|
249
|
+
except pl.ShapeError as e:
|
|
250
|
+
if "unable to vstack, column names don't match" in str(e):
|
|
251
|
+
raise PyoframeError(
|
|
252
|
+
f"Failed to add sets '{self.friendly_name}' and '{other.friendly_name}' because dimensions do not match ({self.dimensions} != {other.dimensions}) "
|
|
253
|
+
) from e
|
|
254
|
+
raise e
|
|
255
|
+
|
|
228
256
|
return super().__add__(other)
|
|
229
257
|
|
|
230
258
|
def __repr__(self):
|
|
231
259
|
return (
|
|
232
260
|
get_obj_repr(self, ("name",), size=self.data.height, dimensions=self.shape)
|
|
233
261
|
+ "\n"
|
|
234
|
-
+
|
|
262
|
+
+ dataframe_to_tupled_list(
|
|
263
|
+
self.data, num_max_elements=Config.print_max_set_elements
|
|
264
|
+
)
|
|
235
265
|
)
|
|
236
266
|
|
|
237
267
|
@staticmethod
|
|
238
268
|
def _set_to_polars(set: "SetTypes") -> pl.DataFrame:
|
|
239
269
|
if isinstance(set, dict):
|
|
240
270
|
df = pl.DataFrame(set)
|
|
271
|
+
elif isinstance(set, Constraint):
|
|
272
|
+
df = set.data.select(set.dimensions_unsafe)
|
|
241
273
|
elif isinstance(set, SupportsMath):
|
|
242
274
|
df = set.to_expr().data.drop(RESERVED_COL_KEYS).unique(maintain_order=True)
|
|
243
275
|
elif isinstance(set, pd.Index):
|
|
@@ -267,7 +299,7 @@ class Set(ModelElement, SupportsMath):
|
|
|
267
299
|
return df
|
|
268
300
|
|
|
269
301
|
|
|
270
|
-
class Expression(ModelElement, SupportsMath):
|
|
302
|
+
class Expression(ModelElement, SupportsMath, SupportPolarsMethodMixin):
|
|
271
303
|
"""A linear expression."""
|
|
272
304
|
|
|
273
305
|
def __init__(self, data: pl.DataFrame):
|
|
@@ -275,7 +307,7 @@ class Expression(ModelElement, SupportsMath):
|
|
|
275
307
|
>>> import pandas as pd
|
|
276
308
|
>>> from pyoframe import Variable, Model
|
|
277
309
|
>>> df = pd.DataFrame({"item" : [1, 1, 1, 2, 2], "time": ["mon", "tue", "wed", "mon", "tue"], "cost": [1, 2, 3, 4, 5]}).set_index(["item", "time"])
|
|
278
|
-
>>> m = Model()
|
|
310
|
+
>>> m = Model("min")
|
|
279
311
|
>>> m.Time = Variable(df.index)
|
|
280
312
|
>>> m.Size = Variable(df.index)
|
|
281
313
|
>>> expr = df["cost"] * m.Time + df["cost"] * m.Size
|
|
@@ -292,14 +324,31 @@ class Expression(ModelElement, SupportsMath):
|
|
|
292
324
|
assert COEF_KEY in data.columns, "Missing coefficient column."
|
|
293
325
|
|
|
294
326
|
# Sanity check no duplicates indices
|
|
295
|
-
if
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
327
|
+
if Config.enable_is_duplicated_expression_safety_check:
|
|
328
|
+
duplicated_mask = data.drop(COEF_KEY).is_duplicated()
|
|
329
|
+
# In theory this should never happen unless there's a bug in the library
|
|
330
|
+
if duplicated_mask.any(): # pragma: no cover
|
|
331
|
+
duplicated_data = data.filter(duplicated_mask)
|
|
332
|
+
raise ValueError(
|
|
333
|
+
f"Cannot create an expression with duplicate indices:\n{duplicated_data}."
|
|
334
|
+
)
|
|
300
335
|
|
|
301
336
|
super().__init__(data)
|
|
302
337
|
|
|
338
|
+
# Might add this in later
|
|
339
|
+
# @classmethod
|
|
340
|
+
# def empty(cls, dimensions=[], type=None):
|
|
341
|
+
# data = {COEF_KEY: [], VAR_KEY: []}
|
|
342
|
+
# data.update({d: [] for d in dimensions})
|
|
343
|
+
# schema = {COEF_KEY: pl.Float64, VAR_KEY: pl.UInt32}
|
|
344
|
+
# if type is not None:
|
|
345
|
+
# schema.update({d: t for d, t in zip(dimensions, type)})
|
|
346
|
+
# return Expression(
|
|
347
|
+
# pl.DataFrame(data).with_columns(
|
|
348
|
+
# *[pl.col(c).cast(t) for c, t in schema.items()]
|
|
349
|
+
# )
|
|
350
|
+
# )
|
|
351
|
+
|
|
303
352
|
def sum(self, over: Union[str, Iterable[str]]):
|
|
304
353
|
"""
|
|
305
354
|
Examples:
|
|
@@ -358,18 +407,18 @@ class Expression(ModelElement, SupportsMath):
|
|
|
358
407
|
|
|
359
408
|
>>> import polars as pl
|
|
360
409
|
>>> from pyoframe import Variable, Model
|
|
361
|
-
>>> pop_data = pl.DataFrame({"city": ["Toronto", "Vancouver", "Boston"], "population": [10, 2, 8]}).to_expr()
|
|
410
|
+
>>> pop_data = pl.DataFrame({"city": ["Toronto", "Vancouver", "Boston"], "year": [2024, 2024, 2024], "population": [10, 2, 8]}).to_expr()
|
|
362
411
|
>>> cities_and_countries = pl.DataFrame({"city": ["Toronto", "Vancouver", "Boston"], "country": ["Canada", "Canada", "USA"]})
|
|
363
412
|
>>> pop_data.map(cities_and_countries)
|
|
364
|
-
<Expression size=2 dimensions={'country': 2} terms=2>
|
|
365
|
-
[Canada]: 12
|
|
366
|
-
[USA]: 8
|
|
413
|
+
<Expression size=2 dimensions={'year': 1, 'country': 2} terms=2>
|
|
414
|
+
[2024,Canada]: 12
|
|
415
|
+
[2024,USA]: 8
|
|
367
416
|
|
|
368
417
|
>>> pop_data.map(cities_and_countries, drop_shared_dims=False)
|
|
369
|
-
<Expression size=3 dimensions={'city': 3, 'country': 2} terms=3>
|
|
370
|
-
[Toronto,Canada]: 10
|
|
371
|
-
[Vancouver,Canada]: 2
|
|
372
|
-
[Boston,USA]: 8
|
|
418
|
+
<Expression size=3 dimensions={'city': 3, 'year': 1, 'country': 2} terms=3>
|
|
419
|
+
[Toronto,2024,Canada]: 10
|
|
420
|
+
[Vancouver,2024,Canada]: 2
|
|
421
|
+
[Boston,2024,USA]: 8
|
|
373
422
|
"""
|
|
374
423
|
mapping_set = Set(mapping_set)
|
|
375
424
|
|
|
@@ -422,7 +471,7 @@ class Expression(ModelElement, SupportsMath):
|
|
|
422
471
|
>>> import polars as pl
|
|
423
472
|
>>> from pyoframe import Variable, Model
|
|
424
473
|
>>> cost = pl.DataFrame({"item" : [1, 1, 1, 2, 2], "time": [1, 2, 3, 1, 2], "cost": [1, 2, 3, 4, 5]})
|
|
425
|
-
>>> m = Model()
|
|
474
|
+
>>> m = Model("min")
|
|
426
475
|
>>> m.quantity = Variable(cost[["item", "time"]])
|
|
427
476
|
>>> (m.quantity * cost).rolling_sum(over="time", window_size=2)
|
|
428
477
|
<Expression size=5 dimensions={'item': 2, 'time': 3} terms=8>
|
|
@@ -501,7 +550,7 @@ class Expression(ModelElement, SupportsMath):
|
|
|
501
550
|
>>> var + pd.DataFrame({"dim1": [1,2], "add": [10, 20]})
|
|
502
551
|
Traceback (most recent call last):
|
|
503
552
|
...
|
|
504
|
-
pyoframe.
|
|
553
|
+
pyoframe.constants.PyoframeError: Failed to add expressions:
|
|
505
554
|
<Expression size=3 dimensions={'dim1': 3} terms=3> + <Expression size=2 dimensions={'dim1': 2} terms=2>
|
|
506
555
|
Due to error:
|
|
507
556
|
Dataframe has unmatched values. If this is intentional, use .drop_unmatched() or .keep_unmatched()
|
|
@@ -630,11 +679,59 @@ class Expression(ModelElement, SupportsMath):
|
|
|
630
679
|
def variable_terms(self):
|
|
631
680
|
return self.data.filter(pl.col(VAR_KEY) != CONST_TERM)
|
|
632
681
|
|
|
682
|
+
@property
|
|
683
|
+
@unwrap_single_values
|
|
684
|
+
def value(self) -> pl.DataFrame:
|
|
685
|
+
"""
|
|
686
|
+
The value of the expression. Only available after the model has been solved.
|
|
687
|
+
|
|
688
|
+
Examples:
|
|
689
|
+
>>> import pyoframe as pf
|
|
690
|
+
>>> m = pf.Model("max")
|
|
691
|
+
>>> m.X = pf.Variable({"dim1": [1, 2, 3]}, ub=10)
|
|
692
|
+
>>> m.expr_1 = 2 * m.X
|
|
693
|
+
>>> m.expr_2 = pf.sum(m.expr_1)
|
|
694
|
+
>>> m.objective = m.expr_2 + 3
|
|
695
|
+
>>> result = m.solve(log_to_console=False)
|
|
696
|
+
>>> m.expr_1.value
|
|
697
|
+
shape: (3, 2)
|
|
698
|
+
┌──────┬──────────┐
|
|
699
|
+
│ dim1 ┆ solution │
|
|
700
|
+
│ --- ┆ --- │
|
|
701
|
+
│ i64 ┆ f64 │
|
|
702
|
+
╞══════╪══════════╡
|
|
703
|
+
│ 1 ┆ 20.0 │
|
|
704
|
+
│ 2 ┆ 20.0 │
|
|
705
|
+
│ 3 ┆ 20.0 │
|
|
706
|
+
└──────┴──────────┘
|
|
707
|
+
>>> m.expr_2.value
|
|
708
|
+
60.0
|
|
709
|
+
>>> m.objective.value
|
|
710
|
+
63.0
|
|
711
|
+
"""
|
|
712
|
+
if self._model.result is None or self._model.result.solution is None:
|
|
713
|
+
raise ValueError(
|
|
714
|
+
"Can't obtain value of expression since the model has not been solved."
|
|
715
|
+
)
|
|
716
|
+
|
|
717
|
+
df = (
|
|
718
|
+
self.data.join(self._model.result.solution.primal, on=VAR_KEY, how="left")
|
|
719
|
+
.drop(VAR_KEY)
|
|
720
|
+
.with_columns((pl.col(SOLUTION_KEY) * pl.col(COEF_KEY)))
|
|
721
|
+
.drop(COEF_KEY)
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
dims = self.dimensions
|
|
725
|
+
if dims is not None:
|
|
726
|
+
df = df.group_by(dims, maintain_order=True)
|
|
727
|
+
return df.sum()
|
|
728
|
+
|
|
633
729
|
def to_str_table(
|
|
634
730
|
self,
|
|
635
731
|
max_line_len=None,
|
|
636
732
|
max_rows=None,
|
|
637
733
|
include_const_term=True,
|
|
734
|
+
include_const_variable=False,
|
|
638
735
|
var_map=None,
|
|
639
736
|
float_precision=None,
|
|
640
737
|
):
|
|
@@ -650,12 +747,15 @@ class Expression(ModelElement, SupportsMath):
|
|
|
650
747
|
data = data.with_columns(
|
|
651
748
|
pl.concat_str(pl.lit("x"), VAR_KEY).alias("str_var")
|
|
652
749
|
)
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
.
|
|
657
|
-
|
|
658
|
-
|
|
750
|
+
if include_const_variable:
|
|
751
|
+
data = data.drop(VAR_KEY).rename({"str_var": VAR_KEY})
|
|
752
|
+
else:
|
|
753
|
+
data = data.with_columns(
|
|
754
|
+
pl.when(pl.col(VAR_KEY) == CONST_TERM)
|
|
755
|
+
.then(pl.lit(""))
|
|
756
|
+
.otherwise("str_var")
|
|
757
|
+
.alias(VAR_KEY)
|
|
758
|
+
).drop("str_var")
|
|
659
759
|
|
|
660
760
|
dimensions = self.dimensions
|
|
661
761
|
|
|
@@ -709,6 +809,7 @@ class Expression(ModelElement, SupportsMath):
|
|
|
709
809
|
max_line_len=None,
|
|
710
810
|
max_rows=None,
|
|
711
811
|
include_const_term=True,
|
|
812
|
+
include_const_variable=False,
|
|
712
813
|
var_map=None,
|
|
713
814
|
include_prefix=True,
|
|
714
815
|
include_header=False,
|
|
@@ -727,6 +828,7 @@ class Expression(ModelElement, SupportsMath):
|
|
|
727
828
|
max_line_len=max_line_len,
|
|
728
829
|
max_rows=max_rows,
|
|
729
830
|
include_const_term=include_const_term,
|
|
831
|
+
include_const_variable=include_const_variable,
|
|
730
832
|
var_map=var_map,
|
|
731
833
|
float_precision=float_precision,
|
|
732
834
|
)
|
|
@@ -749,11 +851,11 @@ class Expression(ModelElement, SupportsMath):
|
|
|
749
851
|
|
|
750
852
|
|
|
751
853
|
@overload
|
|
752
|
-
def sum(over: Union[str, Sequence[str]], expr: SupportsToExpr): ...
|
|
854
|
+
def sum(over: Union[str, Sequence[str]], expr: SupportsToExpr) -> "Expression": ...
|
|
753
855
|
|
|
754
856
|
|
|
755
857
|
@overload
|
|
756
|
-
def sum(over: SupportsToExpr): ...
|
|
858
|
+
def sum(over: SupportsToExpr) -> "Expression": ...
|
|
757
859
|
|
|
758
860
|
|
|
759
861
|
def sum(
|
|
@@ -786,10 +888,10 @@ def sum_by(by: Union[str, Sequence[str]], expr: SupportsToExpr) -> "Expression":
|
|
|
786
888
|
return sum(over=remaining_dims, expr=expr)
|
|
787
889
|
|
|
788
890
|
|
|
789
|
-
class Constraint(
|
|
891
|
+
class Constraint(ModelElementWithId):
|
|
790
892
|
"""A linear programming constraint."""
|
|
791
893
|
|
|
792
|
-
def __init__(self, lhs: Expression
|
|
894
|
+
def __init__(self, lhs: Expression, sense: ConstraintSense):
|
|
793
895
|
"""Initialize a constraint.
|
|
794
896
|
|
|
795
897
|
Parameters:
|
|
@@ -798,53 +900,57 @@ class Constraint(Expression, IdCounterMixin):
|
|
|
798
900
|
sense: Sense
|
|
799
901
|
The sense of the constraint.
|
|
800
902
|
"""
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
else:
|
|
804
|
-
data = lhs
|
|
805
|
-
super().__init__(data)
|
|
806
|
-
if isinstance(lhs, Expression):
|
|
807
|
-
self._model = lhs._model
|
|
903
|
+
self.lhs = lhs
|
|
904
|
+
self._model = lhs._model
|
|
808
905
|
self.sense = sense
|
|
906
|
+
self.to_relax: Optional[FuncArgs] = None
|
|
809
907
|
|
|
810
|
-
dims = self.dimensions
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
)
|
|
814
|
-
|
|
908
|
+
dims = self.lhs.dimensions
|
|
909
|
+
data = pl.DataFrame() if dims is None else self.lhs.data.select(dims).unique()
|
|
910
|
+
|
|
911
|
+
super().__init__(data)
|
|
912
|
+
|
|
913
|
+
def on_add_to_model(self, model: "Model", name: str):
|
|
914
|
+
super().on_add_to_model(model, name)
|
|
915
|
+
if self.to_relax is not None:
|
|
916
|
+
self.relax(*self.to_relax.args, **self.to_relax.kwargs)
|
|
815
917
|
|
|
816
918
|
@property
|
|
919
|
+
@unwrap_single_values
|
|
920
|
+
def slack(self):
|
|
921
|
+
"""
|
|
922
|
+
The slack of the constraint.
|
|
923
|
+
Will raise an error if the model has not already been solved.
|
|
924
|
+
The first call to this property will load the slack values from the solver (lazy loading).
|
|
925
|
+
"""
|
|
926
|
+
if SLACK_COL not in self.data.columns:
|
|
927
|
+
if self._model.solver is None:
|
|
928
|
+
raise ValueError("The model has not been solved yet.")
|
|
929
|
+
self._model.solver.load_slack()
|
|
930
|
+
return self.data.select(self.dimensions_unsafe + [SLACK_COL])
|
|
931
|
+
|
|
932
|
+
@slack.setter
|
|
933
|
+
def slack(self, value):
|
|
934
|
+
self._extend_dataframe_by_id(value)
|
|
935
|
+
|
|
936
|
+
@property
|
|
937
|
+
@unwrap_single_values
|
|
817
938
|
def dual(self) -> Union[pl.DataFrame, float]:
|
|
818
|
-
if DUAL_KEY not in self.
|
|
939
|
+
if DUAL_KEY not in self.data.columns:
|
|
819
940
|
raise ValueError(f"No dual values founds for constraint '{self.name}'")
|
|
820
|
-
|
|
821
|
-
if result.shape == (1, 1):
|
|
822
|
-
return result.item()
|
|
823
|
-
return result
|
|
941
|
+
return self.data.select(self.dimensions_unsafe + [DUAL_KEY])
|
|
824
942
|
|
|
825
943
|
@dual.setter
|
|
826
944
|
def dual(self, value):
|
|
827
|
-
|
|
828
|
-
df = self.data_per_constraint
|
|
829
|
-
if DUAL_KEY in df.columns:
|
|
830
|
-
df = df.drop(DUAL_KEY)
|
|
831
|
-
self.data_per_constraint = df.join(
|
|
832
|
-
value, on=CONSTRAINT_KEY, how="left", validate="1:1"
|
|
833
|
-
)
|
|
945
|
+
self._extend_dataframe_by_id(value)
|
|
834
946
|
|
|
835
947
|
@classmethod
|
|
836
948
|
def get_id_column_name(cls):
|
|
837
949
|
return CONSTRAINT_KEY
|
|
838
950
|
|
|
839
|
-
@property
|
|
840
|
-
def ids(self) -> pl.DataFrame:
|
|
841
|
-
return self.data_per_constraint.select(
|
|
842
|
-
self.dimensions_unsafe + [CONSTRAINT_KEY]
|
|
843
|
-
)
|
|
844
|
-
|
|
845
951
|
def to_str_create_prefix(self, data, const_map=None):
|
|
846
952
|
if const_map is None:
|
|
847
|
-
return
|
|
953
|
+
return self.lhs.to_str_create_prefix(data)
|
|
848
954
|
|
|
849
955
|
data_map = const_map.apply(self.ids, to_col=None)
|
|
850
956
|
|
|
@@ -860,6 +966,107 @@ class Constraint(Expression, IdCounterMixin):
|
|
|
860
966
|
pl.concat_str(CONSTRAINT_KEY, pl.lit(": "), "expr").alias("expr")
|
|
861
967
|
).drop(CONSTRAINT_KEY)
|
|
862
968
|
|
|
969
|
+
def filter(self, *args, **kwargs) -> pl.DataFrame:
|
|
970
|
+
return self.lhs.data.filter(*args, **kwargs)
|
|
971
|
+
|
|
972
|
+
def relax(
|
|
973
|
+
self, cost: SupportsToExpr, max: Optional[SupportsToExpr] = None
|
|
974
|
+
) -> Constraint:
|
|
975
|
+
"""
|
|
976
|
+
Relaxes the constraint by adding a variable to the constraint that can be non-zero at a cost.
|
|
977
|
+
|
|
978
|
+
Parameters:
|
|
979
|
+
cost: SupportsToExpr
|
|
980
|
+
The cost of relaxing the constraint. Costs should be positives as they will automatically
|
|
981
|
+
become negative for maximization problems.
|
|
982
|
+
max: SupportsToExpr, default None
|
|
983
|
+
The maximum value of the relaxation variable.
|
|
984
|
+
|
|
985
|
+
Returns:
|
|
986
|
+
The same constraint
|
|
987
|
+
|
|
988
|
+
Examples:
|
|
989
|
+
>>> import pyoframe as pf
|
|
990
|
+
>>> m = pf.Model("max")
|
|
991
|
+
>>> homework_due_tomorrow = pl.DataFrame({"project": ["A", "B", "C"], "cost_per_hour_underdelivered": [10, 20, 30], "hours_to_finish": [9, 9, 9], "max_underdelivered": [1, 9, 9]})
|
|
992
|
+
>>> m.hours_spent = pf.Variable(homework_due_tomorrow[["project"]], lb=0)
|
|
993
|
+
>>> m.must_finish_project = m.hours_spent >= homework_due_tomorrow[["project", "hours_to_finish"]]
|
|
994
|
+
>>> m.only_one_day = sum("project", m.hours_spent) <= 24
|
|
995
|
+
>>> m.solve(log_to_console=False)
|
|
996
|
+
Status: warning
|
|
997
|
+
Termination condition: infeasible
|
|
998
|
+
<BLANKLINE>
|
|
999
|
+
|
|
1000
|
+
>>> _ = m.must_finish_project.relax(homework_due_tomorrow[["project", "cost_per_hour_underdelivered"]], max=homework_due_tomorrow[["project", "max_underdelivered"]])
|
|
1001
|
+
>>> result = m.solve(log_to_console=False)
|
|
1002
|
+
>>> m.hours_spent.solution
|
|
1003
|
+
shape: (3, 2)
|
|
1004
|
+
┌─────────┬──────────┐
|
|
1005
|
+
│ project ┆ solution │
|
|
1006
|
+
│ --- ┆ --- │
|
|
1007
|
+
│ str ┆ f64 │
|
|
1008
|
+
╞═════════╪══════════╡
|
|
1009
|
+
│ A ┆ 8.0 │
|
|
1010
|
+
│ B ┆ 7.0 │
|
|
1011
|
+
│ C ┆ 9.0 │
|
|
1012
|
+
└─────────┴──────────┘
|
|
1013
|
+
|
|
1014
|
+
|
|
1015
|
+
>>> # It can also be done all in one go!
|
|
1016
|
+
>>> m = pf.Model("max")
|
|
1017
|
+
>>> homework_due_tomorrow = pl.DataFrame({"project": ["A", "B", "C"], "cost_per_hour_underdelivered": [10, 20, 30], "hours_to_finish": [9, 9, 9], "max_underdelivered": [1, 9, 9]})
|
|
1018
|
+
>>> m.hours_spent = pf.Variable(homework_due_tomorrow[["project"]], lb=0)
|
|
1019
|
+
>>> m.must_finish_project = (m.hours_spent >= homework_due_tomorrow[["project", "hours_to_finish"]]).relax(5)
|
|
1020
|
+
>>> m.only_one_day = (sum("project", m.hours_spent) <= 24).relax(1)
|
|
1021
|
+
>>> _ = m.solve(log_to_console=False)
|
|
1022
|
+
>>> m.objective.value
|
|
1023
|
+
-3.0
|
|
1024
|
+
>>> m.hours_spent.solution
|
|
1025
|
+
shape: (3, 2)
|
|
1026
|
+
┌─────────┬──────────┐
|
|
1027
|
+
│ project ┆ solution │
|
|
1028
|
+
│ --- ┆ --- │
|
|
1029
|
+
│ str ┆ f64 │
|
|
1030
|
+
╞═════════╪══════════╡
|
|
1031
|
+
│ A ┆ 9.0 │
|
|
1032
|
+
│ B ┆ 9.0 │
|
|
1033
|
+
│ C ┆ 9.0 │
|
|
1034
|
+
└─────────┴──────────┘
|
|
1035
|
+
"""
|
|
1036
|
+
m = self._model
|
|
1037
|
+
if m is None or self.name is None:
|
|
1038
|
+
self.to_relax = FuncArgs(args=[cost, max])
|
|
1039
|
+
return self
|
|
1040
|
+
|
|
1041
|
+
var_name = f"{self.name}_relaxation"
|
|
1042
|
+
assert not hasattr(
|
|
1043
|
+
m, var_name
|
|
1044
|
+
), "Conflicting names, relaxation variable already exists on the model."
|
|
1045
|
+
var = Variable(self, lb=0, ub=max)
|
|
1046
|
+
|
|
1047
|
+
if self.sense == ConstraintSense.LE:
|
|
1048
|
+
self.lhs -= var
|
|
1049
|
+
elif self.sense == ConstraintSense.GE:
|
|
1050
|
+
self.lhs += var
|
|
1051
|
+
else: # pragma: no cover
|
|
1052
|
+
# TODO
|
|
1053
|
+
raise NotImplementedError(
|
|
1054
|
+
"Relaxation for equalities has not yet been implemented. Submit a pull request!"
|
|
1055
|
+
)
|
|
1056
|
+
|
|
1057
|
+
setattr(m, var_name, var)
|
|
1058
|
+
penalty = var * cost
|
|
1059
|
+
if self.dimensions:
|
|
1060
|
+
penalty = sum(self.dimensions, penalty)
|
|
1061
|
+
if m.sense == ObjSense.MAX:
|
|
1062
|
+
penalty *= -1
|
|
1063
|
+
if m.objective is None:
|
|
1064
|
+
m.objective = penalty
|
|
1065
|
+
else:
|
|
1066
|
+
m.objective += penalty
|
|
1067
|
+
|
|
1068
|
+
return self
|
|
1069
|
+
|
|
863
1070
|
def to_str(
|
|
864
1071
|
self,
|
|
865
1072
|
max_line_len=None,
|
|
@@ -867,16 +1074,16 @@ class Constraint(Expression, IdCounterMixin):
|
|
|
867
1074
|
var_map=None,
|
|
868
1075
|
float_precision=None,
|
|
869
1076
|
const_map=None,
|
|
870
|
-
):
|
|
1077
|
+
) -> str:
|
|
871
1078
|
dims = self.dimensions
|
|
872
|
-
str_table = self.to_str_table(
|
|
1079
|
+
str_table = self.lhs.to_str_table(
|
|
873
1080
|
max_line_len=max_line_len,
|
|
874
1081
|
max_rows=max_rows,
|
|
875
1082
|
include_const_term=False,
|
|
876
1083
|
var_map=var_map,
|
|
877
1084
|
)
|
|
878
1085
|
str_table = self.to_str_create_prefix(str_table, const_map=const_map)
|
|
879
|
-
rhs = self.constant_terms.with_columns(pl.col(COEF_KEY) * -1)
|
|
1086
|
+
rhs = self.lhs.constant_terms.with_columns(pl.col(COEF_KEY) * -1)
|
|
880
1087
|
rhs = cast_coef_to_string(rhs, drop_ones=False, float_precision=float_precision)
|
|
881
1088
|
# Remove leading +
|
|
882
1089
|
rhs = rhs.with_columns(pl.col(COEF_KEY).str.strip_chars(characters=" +"))
|
|
@@ -899,13 +1106,222 @@ class Constraint(Expression, IdCounterMixin):
|
|
|
899
1106
|
sense=f"'{self.sense.value}'",
|
|
900
1107
|
size=len(self),
|
|
901
1108
|
dimensions=self.shape,
|
|
902
|
-
terms=len(self.data),
|
|
1109
|
+
terms=len(self.lhs.data),
|
|
903
1110
|
)
|
|
904
1111
|
+ "\n"
|
|
905
1112
|
+ self.to_str(max_line_len=80, max_rows=15)
|
|
906
1113
|
)
|
|
907
1114
|
|
|
1115
|
+
|
|
1116
|
+
class Variable(ModelElementWithId, SupportsMath, SupportPolarsMethodMixin):
|
|
1117
|
+
"""
|
|
1118
|
+
Represents one or many decision variable in an optimization model.
|
|
1119
|
+
|
|
1120
|
+
Parameters:
|
|
1121
|
+
*indexing_sets: SetTypes (typically a DataFrame or Set)
|
|
1122
|
+
If no indexing_sets are provided, a single variable with no dimensions is created.
|
|
1123
|
+
Otherwise, a variable is created for each element in the Cartesian product of the indexing_sets (see Set for details on behaviour).
|
|
1124
|
+
lb: float
|
|
1125
|
+
The lower bound for all variables.
|
|
1126
|
+
ub: float
|
|
1127
|
+
The upper bound for all variables.
|
|
1128
|
+
vtype: VType | VTypeValue
|
|
1129
|
+
The type of the variable. Can be either a VType enum or a string. Default is VType.CONTINUOUS.
|
|
1130
|
+
equals: SupportsToExpr
|
|
1131
|
+
When specified, a variable is created and a constraint is added to make the variable equal to the provided expression.
|
|
1132
|
+
|
|
1133
|
+
Examples:
|
|
1134
|
+
>>> import pandas as pd
|
|
1135
|
+
>>> from pyoframe import Variable
|
|
1136
|
+
>>> df = pd.DataFrame({"dim1": [1, 1, 2, 2, 3, 3], "dim2": ["a", "b", "a", "b", "a", "b"]})
|
|
1137
|
+
>>> Variable(df)
|
|
1138
|
+
<Variable lb=-inf ub=inf size=6 dimensions={'dim1': 3, 'dim2': 2}>
|
|
1139
|
+
[1,a]: x1
|
|
1140
|
+
[1,b]: x2
|
|
1141
|
+
[2,a]: x3
|
|
1142
|
+
[2,b]: x4
|
|
1143
|
+
[3,a]: x5
|
|
1144
|
+
[3,b]: x6
|
|
1145
|
+
>>> Variable(df[["dim1"]])
|
|
1146
|
+
Traceback (most recent call last):
|
|
1147
|
+
...
|
|
1148
|
+
ValueError: Duplicate rows found in input data.
|
|
1149
|
+
>>> Variable(df[["dim1"]].drop_duplicates())
|
|
1150
|
+
<Variable lb=-inf ub=inf size=3 dimensions={'dim1': 3}>
|
|
1151
|
+
[1]: x7
|
|
1152
|
+
[2]: x8
|
|
1153
|
+
[3]: x9
|
|
1154
|
+
"""
|
|
1155
|
+
|
|
1156
|
+
# TODO: Breaking change, remove support for Iterable[AcceptableSets]
|
|
1157
|
+
def __init__(
|
|
1158
|
+
self,
|
|
1159
|
+
*indexing_sets: SetTypes | Iterable[SetTypes],
|
|
1160
|
+
lb: float | int | SupportsToExpr | None = None,
|
|
1161
|
+
ub: float | int | SupportsToExpr | None = None,
|
|
1162
|
+
vtype: VType | VTypeValue = VType.CONTINUOUS,
|
|
1163
|
+
equals: SupportsToExpr = None,
|
|
1164
|
+
):
|
|
1165
|
+
if lb is None:
|
|
1166
|
+
lb = float("-inf")
|
|
1167
|
+
if ub is None:
|
|
1168
|
+
ub = float("inf")
|
|
1169
|
+
if equals is not None:
|
|
1170
|
+
assert (
|
|
1171
|
+
len(indexing_sets) == 0
|
|
1172
|
+
), "Cannot specify both 'equals' and 'indexing_sets'"
|
|
1173
|
+
indexing_sets = (equals,)
|
|
1174
|
+
|
|
1175
|
+
data = Set(*indexing_sets).data if len(indexing_sets) > 0 else pl.DataFrame()
|
|
1176
|
+
super().__init__(data)
|
|
1177
|
+
|
|
1178
|
+
self.vtype: VType = VType(vtype)
|
|
1179
|
+
self._equals = equals
|
|
1180
|
+
|
|
1181
|
+
# Tightening the bounds is not strictly necessary, but it adds clarity
|
|
1182
|
+
if self.vtype == VType.BINARY:
|
|
1183
|
+
lb, ub = 0, 1
|
|
1184
|
+
|
|
1185
|
+
if isinstance(lb, (float, int)):
|
|
1186
|
+
self.lb, self.lb_constraint = lb, None
|
|
1187
|
+
else:
|
|
1188
|
+
self.lb, self.lb_constraint = float("-inf"), lb <= self
|
|
1189
|
+
|
|
1190
|
+
if isinstance(ub, (float, int)):
|
|
1191
|
+
self.ub, self.ub_constraint = ub, None
|
|
1192
|
+
else:
|
|
1193
|
+
self.ub, self.ub_constraint = float("inf"), self <= ub
|
|
1194
|
+
|
|
1195
|
+
def on_add_to_model(self, model: "Model", name: str):
|
|
1196
|
+
super().on_add_to_model(model, name)
|
|
1197
|
+
if self.lb_constraint is not None:
|
|
1198
|
+
setattr(model, f"{name}_lb", self.lb_constraint)
|
|
1199
|
+
if self.ub_constraint is not None:
|
|
1200
|
+
setattr(model, f"{name}_ub", self.ub_constraint)
|
|
1201
|
+
if self._equals is not None:
|
|
1202
|
+
setattr(model, f"{name}_equals", self == self._equals)
|
|
1203
|
+
|
|
1204
|
+
@classmethod
|
|
1205
|
+
def get_id_column_name(cls):
|
|
1206
|
+
return VAR_KEY
|
|
1207
|
+
|
|
1208
|
+
@property
|
|
1209
|
+
@unwrap_single_values
|
|
1210
|
+
def solution(self):
|
|
1211
|
+
if SOLUTION_KEY not in self.data.columns:
|
|
1212
|
+
raise ValueError(f"No solution solution found for Variable '{self.name}'.")
|
|
1213
|
+
|
|
1214
|
+
return self.data.select(self.dimensions_unsafe + [SOLUTION_KEY])
|
|
1215
|
+
|
|
1216
|
+
@property
|
|
1217
|
+
@unwrap_single_values
|
|
1218
|
+
def RC(self):
|
|
1219
|
+
"""
|
|
1220
|
+
The reduced cost of the variable.
|
|
1221
|
+
Will raise an error if the model has not already been solved.
|
|
1222
|
+
The first call to this property will load the reduced costs from the solver (lazy loading).
|
|
1223
|
+
"""
|
|
1224
|
+
if RC_COL not in self.data.columns:
|
|
1225
|
+
if self._model.solver is None:
|
|
1226
|
+
raise ValueError("The model has not been solved yet.")
|
|
1227
|
+
self._model.solver.load_rc()
|
|
1228
|
+
return self.data.select(self.dimensions_unsafe + [RC_COL])
|
|
1229
|
+
|
|
1230
|
+
@RC.setter
|
|
1231
|
+
def RC(self, value):
|
|
1232
|
+
self._extend_dataframe_by_id(value)
|
|
1233
|
+
|
|
1234
|
+
@solution.setter
|
|
1235
|
+
def solution(self, value):
|
|
1236
|
+
self._extend_dataframe_by_id(value)
|
|
1237
|
+
|
|
1238
|
+
def __repr__(self):
|
|
1239
|
+
return (
|
|
1240
|
+
get_obj_repr(
|
|
1241
|
+
self, ("name", "lb", "ub"), size=self.data.height, dimensions=self.shape
|
|
1242
|
+
)
|
|
1243
|
+
+ "\n"
|
|
1244
|
+
+ self.to_expr().to_str(max_line_len=80, max_rows=10)
|
|
1245
|
+
)
|
|
1246
|
+
|
|
1247
|
+
def to_expr(self) -> Expression:
|
|
1248
|
+
return self._new(self.data.drop(SOLUTION_KEY))
|
|
1249
|
+
|
|
908
1250
|
def _new(self, data: pl.DataFrame):
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
1251
|
+
e = Expression(data.with_columns(pl.lit(1.0).alias(COEF_KEY)))
|
|
1252
|
+
e._model = self._model
|
|
1253
|
+
# We propogate the unmatched strategy intentionally. Without this a .keep_unmatched() on a variable would always be lost.
|
|
1254
|
+
e.unmatched_strategy = self.unmatched_strategy
|
|
1255
|
+
e.allowed_new_dims = self.allowed_new_dims
|
|
1256
|
+
return e
|
|
1257
|
+
|
|
1258
|
+
def next(self, dim: str, wrap_around: bool = False) -> Expression:
|
|
1259
|
+
"""
|
|
1260
|
+
Creates an expression where the variable at each index is the next variable in the specified dimension.
|
|
1261
|
+
|
|
1262
|
+
Parameters:
|
|
1263
|
+
dim:
|
|
1264
|
+
The dimension over which to shift the variable.
|
|
1265
|
+
wrap_around:
|
|
1266
|
+
If True, the last index in the dimension is connected to the first index.
|
|
1267
|
+
|
|
1268
|
+
Examples:
|
|
1269
|
+
>>> import pandas as pd
|
|
1270
|
+
>>> from pyoframe import Variable, Model
|
|
1271
|
+
>>> time_dim = pd.DataFrame({"time": ["00:00", "06:00", "12:00", "18:00"]})
|
|
1272
|
+
>>> space_dim = pd.DataFrame({"city": ["Toronto", "Berlin"]})
|
|
1273
|
+
>>> m = Model("min")
|
|
1274
|
+
>>> m.bat_charge = Variable(time_dim, space_dim)
|
|
1275
|
+
>>> m.bat_flow = Variable(time_dim, space_dim)
|
|
1276
|
+
>>> # Fails because the dimensions are not the same
|
|
1277
|
+
>>> m.bat_charge + m.bat_flow == m.bat_charge.next("time")
|
|
1278
|
+
Traceback (most recent call last):
|
|
1279
|
+
...
|
|
1280
|
+
pyoframe.constants.PyoframeError: Failed to add expressions:
|
|
1281
|
+
<Expression size=8 dimensions={'time': 4, 'city': 2} terms=16> + <Expression size=6 dimensions={'city': 2, 'time': 3} terms=6>
|
|
1282
|
+
Due to error:
|
|
1283
|
+
Dataframe has unmatched values. If this is intentional, use .drop_unmatched() or .keep_unmatched()
|
|
1284
|
+
shape: (2, 4)
|
|
1285
|
+
┌───────┬─────────┬────────────┬────────────┐
|
|
1286
|
+
│ time ┆ city ┆ time_right ┆ city_right │
|
|
1287
|
+
│ --- ┆ --- ┆ --- ┆ --- │
|
|
1288
|
+
│ str ┆ str ┆ str ┆ str │
|
|
1289
|
+
╞═══════╪═════════╪════════════╪════════════╡
|
|
1290
|
+
│ 18:00 ┆ Toronto ┆ null ┆ null │
|
|
1291
|
+
│ 18:00 ┆ Berlin ┆ null ┆ null │
|
|
1292
|
+
└───────┴─────────┴────────────┴────────────┘
|
|
1293
|
+
|
|
1294
|
+
>>> (m.bat_charge + m.bat_flow).drop_unmatched() == m.bat_charge.next("time")
|
|
1295
|
+
<Constraint sense='=' size=6 dimensions={'time': 3, 'city': 2} terms=18>
|
|
1296
|
+
[00:00,Berlin]: bat_charge[00:00,Berlin] + bat_flow[00:00,Berlin] - bat_charge[06:00,Berlin] = 0
|
|
1297
|
+
[00:00,Toronto]: bat_charge[00:00,Toronto] + bat_flow[00:00,Toronto] - bat_charge[06:00,Toronto] = 0
|
|
1298
|
+
[06:00,Berlin]: bat_charge[06:00,Berlin] + bat_flow[06:00,Berlin] - bat_charge[12:00,Berlin] = 0
|
|
1299
|
+
[06:00,Toronto]: bat_charge[06:00,Toronto] + bat_flow[06:00,Toronto] - bat_charge[12:00,Toronto] = 0
|
|
1300
|
+
[12:00,Berlin]: bat_charge[12:00,Berlin] + bat_flow[12:00,Berlin] - bat_charge[18:00,Berlin] = 0
|
|
1301
|
+
[12:00,Toronto]: bat_charge[12:00,Toronto] + bat_flow[12:00,Toronto] - bat_charge[18:00,Toronto] = 0
|
|
1302
|
+
|
|
1303
|
+
>>> (m.bat_charge + m.bat_flow) == m.bat_charge.next("time", wrap_around=True)
|
|
1304
|
+
<Constraint sense='=' size=8 dimensions={'time': 4, 'city': 2} terms=24>
|
|
1305
|
+
[00:00,Berlin]: bat_charge[00:00,Berlin] + bat_flow[00:00,Berlin] - bat_charge[06:00,Berlin] = 0
|
|
1306
|
+
[00:00,Toronto]: bat_charge[00:00,Toronto] + bat_flow[00:00,Toronto] - bat_charge[06:00,Toronto] = 0
|
|
1307
|
+
[06:00,Berlin]: bat_charge[06:00,Berlin] + bat_flow[06:00,Berlin] - bat_charge[12:00,Berlin] = 0
|
|
1308
|
+
[06:00,Toronto]: bat_charge[06:00,Toronto] + bat_flow[06:00,Toronto] - bat_charge[12:00,Toronto] = 0
|
|
1309
|
+
[12:00,Berlin]: bat_charge[12:00,Berlin] + bat_flow[12:00,Berlin] - bat_charge[18:00,Berlin] = 0
|
|
1310
|
+
[12:00,Toronto]: bat_charge[12:00,Toronto] + bat_flow[12:00,Toronto] - bat_charge[18:00,Toronto] = 0
|
|
1311
|
+
[18:00,Berlin]: bat_charge[18:00,Berlin] + bat_flow[18:00,Berlin] - bat_charge[00:00,Berlin] = 0
|
|
1312
|
+
[18:00,Toronto]: bat_charge[18:00,Toronto] + bat_flow[18:00,Toronto] - bat_charge[00:00,Toronto] = 0
|
|
1313
|
+
"""
|
|
1314
|
+
|
|
1315
|
+
wrapped = self.data.select(dim).unique(maintain_order=True).sort(by=dim)
|
|
1316
|
+
wrapped = wrapped.with_columns(pl.col(dim).shift(-1).alias("__next"))
|
|
1317
|
+
if wrap_around:
|
|
1318
|
+
wrapped = wrapped.with_columns(pl.col("__next").fill_null(pl.first(dim)))
|
|
1319
|
+
else:
|
|
1320
|
+
wrapped = wrapped.drop_nulls(dim)
|
|
1321
|
+
|
|
1322
|
+
expr = self.to_expr()
|
|
1323
|
+
data = expr.data.rename({dim: "__prev"})
|
|
1324
|
+
data = data.join(
|
|
1325
|
+
wrapped, left_on="__prev", right_on="__next", how="inner"
|
|
1326
|
+
).drop(["__prev", "__next"])
|
|
1327
|
+
return expr._new(data)
|