PySCIPOpt 6.0.0__cp314-cp314t-win_amd64.whl → 6.1.0__cp314-cp314t-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyscipopt/matrix.pxi CHANGED
@@ -1,112 +1,255 @@
1
- """
2
- # TODO Cythonize things. Improve performance.
3
- # TODO Add tests
4
- """
5
-
1
+ import operator
2
+ from typing import Literal, Optional, Tuple, Union
6
3
  import numpy as np
7
- from typing import Union
4
+ try:
5
+ # NumPy 2.x location
6
+ from numpy.lib.array_utils import normalize_axis_tuple
7
+ except ImportError:
8
+ # Fallback for NumPy 1.x
9
+ from numpy.core.numeric import normalize_axis_tuple
8
10
 
11
+ cimport numpy as cnp
12
+ from pyscipopt.scip cimport Expr, Solution
9
13
 
10
- def _is_number(e):
11
- try:
12
- f = float(e)
13
- return True
14
- except ValueError: # for malformed strings
15
- return False
16
- except TypeError: # for other types (Variable, Expr)
17
- return False
14
+ cnp.import_array()
18
15
 
19
16
 
20
- def _matrixexpr_richcmp(self, other, op):
21
- def _richcmp(self, other, op):
22
- if op == 1: # <=
23
- return self.__le__(other)
24
- elif op == 5: # >=
25
- return self.__ge__(other)
26
- elif op == 2: # ==
27
- return self.__eq__(other)
28
- else:
29
- raise NotImplementedError("Can only support constraints with '<=', '>=', or '=='.")
17
+ class MatrixExpr(np.ndarray):
30
18
 
31
- if _is_number(other) or isinstance(other, Expr):
32
- res = np.empty(self.shape, dtype=object)
33
- res.flat = [_richcmp(i, other, op) for i in self.flat]
19
+ __array_priority__ = 100
34
20
 
35
- elif isinstance(other, np.ndarray):
36
- out = np.broadcast(self, other)
37
- res = np.empty(out.shape, dtype=object)
38
- res.flat = [_richcmp(i, j, op) for i, j in out]
21
+ def __array_ufunc__(
22
+ self,
23
+ ufunc: np.ufunc,
24
+ method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],
25
+ *args,
26
+ **kwargs,
27
+ ):
28
+ """
29
+ Customizes the behavior of NumPy ufuncs for MatrixExpr.
39
30
 
40
- else:
41
- raise TypeError(f"Unsupported type {type(other)}")
31
+ Parameters
32
+ ----------
33
+ ufunc : numpy.ufunc
34
+ The ufunc object that was called.
42
35
 
43
- return res.view(MatrixExprCons)
36
+ method : {"__call__", "reduce", "reduceat", "accumulate", "outer", "at"}
37
+ A string indicating which ufunc method was called.
44
38
 
39
+ *args : tuple
40
+ The input arguments to the ufunc.
41
+
42
+ **kwargs : dict
43
+ Additional keyword arguments to the ufunc.
44
+
45
+ Returns
46
+ -------
47
+ Expr, MatrixExpr
48
+ The result of the ufunc operation is wrapped back into a MatrixExpr if
49
+ applicable.
45
50
 
46
- class MatrixExpr(np.ndarray):
47
- def sum(self, **kwargs):
48
- """
49
- Based on `numpy.ndarray.sum`, but returns a scalar if `axis=None`.
50
- This is useful for matrix expressions to compare with a matrix or a scalar.
51
51
  """
52
+ res = NotImplemented
53
+ # Unboxing MatrixExpr to stop __array_ufunc__ recursion
54
+ args = tuple(_ensure_array(arg) for arg in args)
55
+ if method == "reduce": # Handle reduction operations, e.g., np.sum(a)
56
+ if ufunc is np.add:
57
+ res = _core_sum(args[0], **kwargs)
58
+
59
+ if method == "__call__": # Standard ufunc call, e.g., np.add(a, b)
60
+ if ufunc in {np.matmul, np.dot}:
61
+ res = _core_dot(args[0], args[1])
62
+ elif ufunc is np.less_equal:
63
+ return _vec_le(args[0], args[1]).view(MatrixExprCons)
64
+ elif ufunc is np.greater_equal:
65
+ return _vec_ge(args[0], args[1]).view(MatrixExprCons)
66
+ elif ufunc is np.equal:
67
+ return _vec_eq(args[0], args[1]).view(MatrixExprCons)
68
+ elif ufunc in {np.less, np.greater, np.not_equal}:
69
+ raise NotImplementedError("can only support '<=', '>=', or '=='")
70
+
71
+ if res is NotImplemented:
72
+ if "out" in kwargs: # Unboxing MatrixExpr to stop __array_ufunc__ recursion
73
+ kwargs["out"] = tuple(_ensure_array(arg, False) for arg in kwargs["out"])
74
+ res = super().__array_ufunc__(ufunc, method, *args, **kwargs)
75
+ return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res
76
+
77
+ def _evaluate(self, Solution sol) -> NDArray[np.float64]:
78
+ return _vec_evaluate(self, sol).view(np.ndarray)
52
79
 
53
- if kwargs.get("axis") is None:
54
- # Speed up `.sum()` #1070
55
- return quicksum(self.flat)
56
- return super().sum(**kwargs)
57
-
58
- def __le__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons:
59
- return _matrixexpr_richcmp(self, other, 1)
60
-
61
- def __ge__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons:
62
- return _matrixexpr_richcmp(self, other, 5)
63
-
64
- def __eq__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons:
65
- return _matrixexpr_richcmp(self, other, 2)
66
-
67
- def __add__(self, other):
68
- return super().__add__(other).view(MatrixExpr)
69
-
70
- def __iadd__(self, other):
71
- return super().__iadd__(other).view(MatrixExpr)
72
-
73
- def __mul__(self, other):
74
- return super().__mul__(other).view(MatrixExpr)
75
-
76
- def __truediv__(self, other):
77
- return super().__truediv__(other).view(MatrixExpr)
78
-
79
- def __rtruediv__(self, other):
80
- return super().__rtruediv__(other).view(MatrixExpr)
81
-
82
- def __pow__(self, other):
83
- return super().__pow__(other).view(MatrixExpr)
84
-
85
- def __sub__(self, other):
86
- return super().__sub__(other).view(MatrixExpr)
87
-
88
- def __radd__(self, other):
89
- return super().__radd__(other).view(MatrixExpr)
90
-
91
- def __rmul__(self, other):
92
- return super().__rmul__(other).view(MatrixExpr)
93
-
94
- def __rsub__(self, other):
95
- return super().__rsub__(other).view(MatrixExpr)
96
-
97
- def __matmul__(self, other):
98
- return super().__matmul__(other).view(MatrixExpr)
99
80
 
100
81
  class MatrixGenExpr(MatrixExpr):
101
82
  pass
102
83
 
103
- class MatrixExprCons(np.ndarray):
104
84
 
105
- def __le__(self, other: Union[float, int, np.ndarray]) -> MatrixExprCons:
106
- return _matrixexpr_richcmp(self, other, 1)
107
-
108
- def __ge__(self, other: Union[float, int, np.ndarray]) -> MatrixExprCons:
109
- return _matrixexpr_richcmp(self, other, 5)
85
+ class MatrixExprCons(np.ndarray):
110
86
 
111
- def __eq__(self, other):
112
- raise NotImplementedError("Cannot compare MatrixExprCons with '=='.")
87
+ __array_priority__ = 101
88
+
89
+ def __array_ufunc__(self, ufunc, method, *args, **kwargs):
90
+ if method == "__call__":
91
+ args = tuple(_ensure_array(arg) for arg in args)
92
+ if ufunc is np.less_equal:
93
+ return _vec_le(args[0], args[1]).view(MatrixExprCons)
94
+ elif ufunc is np.greater_equal:
95
+ return _vec_ge(args[0], args[1]).view(MatrixExprCons)
96
+ raise NotImplementedError("can only support '<=' or '>='")
97
+
98
+ def __eq__(self, _):
99
+ # TODO: Once numpy version >= 2.x, remove `__eq__`, as it will be handled by
100
+ # `__array_ufunc__`.
101
+ raise NotImplementedError("can only support '<=' or '>='")
102
+
103
+
104
+ _vec_le = np.frompyfunc(operator.le, 2, 1)
105
+ _vec_ge = np.frompyfunc(operator.ge, 2, 1)
106
+ _vec_eq = np.frompyfunc(operator.eq, 2, 1)
107
+ _vec_evaluate = np.frompyfunc(lambda expr, sol: expr._evaluate(sol), 2, 1)
108
+
109
+
110
+ cdef inline _ensure_array(arg, bint convert_scalar = True):
111
+ if isinstance(arg, np.ndarray):
112
+ return arg.view(np.ndarray)
113
+ elif isinstance(arg, (list, tuple)):
114
+ return np.asarray(arg)
115
+ return np.array(arg, dtype=object) if convert_scalar else arg
116
+
117
+
118
+ def _core_dot(cnp.ndarray a, cnp.ndarray b) -> Union[Expr, np.ndarray]:
119
+ """
120
+ Perform matrix multiplication between a N-Demension constant array and a N-Demension
121
+ `np.ndarray` of type `object` and containing `Expr` objects.
122
+
123
+ Parameters
124
+ ----------
125
+ a : np.ndarray
126
+ A constant n-d `np.ndarray` of type `np.float64`.
127
+
128
+ b : np.ndarray
129
+ A n-d `np.ndarray` of type `object` and containing `Expr` objects.
130
+
131
+ Returns
132
+ -------
133
+ Expr or np.ndarray
134
+ If both `a` and `b` are 1-D arrays, return an `Expr`, otherwise return a
135
+ `np.ndarray` of type `object` and containing `Expr` objects.
136
+ """
137
+ cdef bint a_is_1d = a.ndim == 1
138
+ cdef bint b_is_1d = b.ndim == 1
139
+ cdef cnp.ndarray a_nd = a[..., np.newaxis, :] if a_is_1d else a
140
+ cdef cnp.ndarray b_nd = b[..., :, np.newaxis] if b_is_1d else b
141
+ cdef bint a_is_num = a_nd.dtype.kind in "fiub"
142
+
143
+ if a_is_num ^ (b_nd.dtype.kind in "fiub"):
144
+ res = _core_dot_nd(a_nd, b_nd) if a_is_num else _core_dot_nd(b_nd.T, a_nd.T).T
145
+ if a_is_1d and b_is_1d:
146
+ return res.item()
147
+ if a_is_1d:
148
+ return res.reshape(np.delete(res.shape, -2))
149
+ if b_is_1d:
150
+ return res.reshape(np.delete(res.shape, -1))
151
+ return res
152
+ return NotImplemented
153
+
154
+
155
+ def _core_dot_2d(cnp.ndarray a, cnp.ndarray x) -> np.ndarray:
156
+ """
157
+ Perform matrix multiplication between a 2-Demension constant array and a 2-Demension
158
+ `np.ndarray` of type `object` and containing `Expr` objects.
159
+
160
+ Parameters
161
+ ----------
162
+ a : np.ndarray
163
+ A 2-D `np.ndarray` of type `np.float64`.
164
+
165
+ x : np.ndarray
166
+ A 2-D `np.ndarray` of type `object` and containing `Expr` objects.
167
+
168
+ Returns
169
+ -------
170
+ np.ndarray
171
+ A 2-D `np.ndarray` of type `object` and containing `Expr` objects.
172
+ """
173
+ if not a.flags.c_contiguous or a.dtype != np.float64:
174
+ a = np.ascontiguousarray(a, dtype=np.float64)
175
+
176
+ cdef const double[:, :] a_view = a
177
+ cdef int m = a.shape[0], k = x.shape[1]
178
+ cdef cnp.ndarray[object, ndim=2] res = np.zeros((m, k), dtype=object)
179
+ cdef Py_ssize_t[:] nonzero
180
+ cdef int i, j, idx
181
+
182
+ for i in range(m):
183
+ if (nonzero := np.flatnonzero(a_view[i, :])).size == 0:
184
+ continue
185
+
186
+ for j in range(k):
187
+ res[i, j] = quicksum(a_view[i, idx] * x[idx, j] for idx in nonzero)
188
+
189
+ return res
190
+
191
+
192
+ _core_dot_nd = np.vectorize(
193
+ _core_dot_2d,
194
+ otypes=[object],
195
+ signature="(m,n),(n,p)->(m,p)",
196
+ )
197
+
198
+
199
+ def _core_sum(
200
+ cnp.ndarray a,
201
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
202
+ keepdims: bool = False,
203
+ **kwargs,
204
+ ) -> Union[Expr, np.ndarray]:
205
+ """
206
+ Return the sum of the array elements over the given axis.
207
+
208
+ Parameters
209
+ ----------
210
+ a : np.ndarray
211
+ A `np.ndarray` of type `object` and containing `Expr` objects.
212
+
213
+ axis : None or int or tuple of ints, optional
214
+ Axis or axes along which a sum is performed. The default, axis=None, will
215
+ sum all of the elements of the input array. If axis is negative it counts
216
+ from the last to the first axis. If axis is a tuple of ints, a sum is
217
+ performed on all of the axes specified in the tuple instead of a single axis
218
+ or all the axes as before.
219
+
220
+ keepdims : bool, optional
221
+ If this is set to True, the axes which are reduced are left in the result as
222
+ dimensions with size one. With this option, the result will broadcast
223
+ correctly against the input array.
224
+
225
+ **kwargs : ignored
226
+ Additional keyword arguments are ignored. They exist for compatibility
227
+ with `numpy.ndarray.sum`.
228
+
229
+ Returns
230
+ -------
231
+ Expr or np.ndarray
232
+ If the sum is performed over all axes, return an Expr, otherwise return
233
+ a np.ndarray.
234
+
235
+ """
236
+ axis: Tuple[int, ...] = normalize_axis_tuple(
237
+ range(a.ndim) if axis is None else axis, a.ndim
238
+ )
239
+ if len(axis) == a.ndim:
240
+ res = quicksum(a.flat)
241
+ return (
242
+ np.array([res], dtype=object).reshape([1] * a.ndim)
243
+ if keepdims
244
+ else res
245
+ )
246
+
247
+ keep_axes = tuple(i for i in range(a.ndim) if i not in axis)
248
+ shape = (
249
+ tuple(1 if i in axis else a.shape[i] for i in range(a.ndim))
250
+ if keepdims
251
+ else tuple(a.shape[i] for i in keep_axes)
252
+ )
253
+ return np.apply_along_axis(
254
+ quicksum, -1, a.transpose(keep_axes + axis).reshape(shape + (-1,))
255
+ )
pyscipopt/presol.pxi CHANGED
@@ -25,8 +25,7 @@ cdef class Presol:
25
25
 
26
26
  def presolexec(self, nrounds, presoltiming):
27
27
  '''executes presolver'''
28
- print("python error in presolexec: this method needs to be implemented")
29
- return {}
28
+ raise NotImplementedError("presolexec() is a fundamental callback and should be implemented in the derived class")
30
29
 
31
30
 
32
31
 
pyscipopt/propagator.pxi CHANGED
@@ -37,13 +37,11 @@ cdef class Prop:
37
37
 
38
38
  def propexec(self, proptiming):
39
39
  '''calls execution method of propagator'''
40
- print("python error in propexec: this method needs to be implemented")
41
- return {}
40
+ raise NotImplementedError("propexec() is a fundamental callback and should be implemented in the derived class")
42
41
 
43
42
  def propresprop(self, confvar, inferinfo, bdtype, relaxedbd):
44
43
  '''resolves the given conflicting bound, that was reduced by the given propagator'''
45
- print("python error in propresprop: this method needs to be implemented")
46
- return {}
44
+ raise NotImplementedError("propresprop() is a fundamental callback and should be implemented in the derived class")
47
45
 
48
46
 
49
47
 
@@ -24,7 +24,7 @@ def getLocalConss(model: Model, node = None) -> List[List[Constraint]]:
24
24
  else:
25
25
  cur_node = node
26
26
 
27
- added_conss = []
27
+ added_conss: List[Constraint] = []
28
28
  while cur_node is not None:
29
29
  added_conss = cur_node.getAddedConss() + added_conss
30
30
  cur_node = cur_node.getParent()
@@ -15,31 +15,24 @@ def attach_primal_dual_evolution_eventhdlr(model: Model):
15
15
 
16
16
  def eventinit(self): # we want to collect best primal solutions and best dual solutions
17
17
  self.model.catchEvent(SCIP_EVENTTYPE.BESTSOLFOUND, self)
18
- self.model.catchEvent(SCIP_EVENTTYPE.LPSOLVED, self)
19
- self.model.catchEvent(SCIP_EVENTTYPE.NODESOLVED, self)
20
-
18
+ self.model.catchEvent(SCIP_EVENTTYPE.DUALBOUNDIMPROVED, self)
21
19
 
22
20
  def eventexec(self, event):
23
21
  # if a new best primal solution was found, we save when it was found and also its objective
24
22
  if event.getType() == SCIP_EVENTTYPE.BESTSOLFOUND:
25
23
  self.model.data["primal_log"].append([self.model.getSolvingTime(), self.model.getPrimalbound()])
26
24
 
27
- if not self.model.data["dual_log"]:
28
- self.model.data["dual_log"].append([self.model.getSolvingTime(), self.model.getDualbound()])
29
-
30
- if self.model.getObjectiveSense() == "minimize":
31
- if self.model.isGT(self.model.getDualbound(), self.model.data["dual_log"][-1][1]):
32
- self.model.data["dual_log"].append([self.model.getSolvingTime(), self.model.getDualbound()])
33
- else:
34
- if self.model.isLT(self.model.getDualbound(), self.model.data["dual_log"][-1][1]):
35
- self.model.data["dual_log"].append([self.model.getSolvingTime(), self.model.getDualbound()])
36
-
25
+ if event.getType() == SCIP_EVENTTYPE.DUALBOUNDIMPROVED:
26
+ self.model.data["dual_log"].append([self.model.getSolvingTime(), self.model.getDualbound()])
37
27
 
38
28
  if not hasattr(model, "data") or model.data==None:
39
29
  model.data = {}
40
30
 
41
- model.data["primal_log"] = []
42
- model.data["dual_log"] = []
31
+ model.data.update({
32
+ 'primal_log': [],
33
+ 'dual_log': []
34
+ })
35
+
43
36
  hdlr = GapEventhdlr()
44
37
  model.includeEventhdlr(hdlr, "gapEventHandler", "Event handler which collects primal and dual solution evolution")
45
38
 
@@ -0,0 +1,37 @@
1
+ from pyscipopt import SCIP_EVENTTYPE, Eventhdlr, Model
2
+
3
+
4
+ def attach_structured_optimization_trace(model: Model):
5
+ """
6
+ Attaches an event handler that records optimization progress in structured JSONL format.
7
+
8
+ Args:
9
+ model: SCIP Model
10
+ """
11
+
12
+ class _TraceEventhdlr(Eventhdlr):
13
+ def eventinit(self):
14
+ self.model.catchEvent(SCIP_EVENTTYPE.BESTSOLFOUND, self)
15
+ self.model.catchEvent(SCIP_EVENTTYPE.DUALBOUNDIMPROVED, self)
16
+
17
+ def eventexec(self, event):
18
+ record = {
19
+ "time": self.model.getSolvingTime(),
20
+ "primalbound": self.model.getPrimalbound(),
21
+ "dualbound": self.model.getDualbound(),
22
+ "gap": self.model.getGap(),
23
+ "nodes": self.model.getNNodes(),
24
+ "nsol": self.model.getNSols(),
25
+ }
26
+ self.model.data["trace"].append(record)
27
+
28
+ if not hasattr(model, "data") or model.data is None:
29
+ model.data = {}
30
+ model.data["trace"] = []
31
+
32
+ hdlr = _TraceEventhdlr()
33
+ model.includeEventhdlr(
34
+ hdlr, "structured_trace", "Structured optimization trace handler"
35
+ )
36
+
37
+ return model
Binary file
pyscipopt/scip.pxd CHANGED
@@ -737,6 +737,7 @@ cdef extern from "scip/scip.h":
737
737
  SCIP_Real SCIPepsilon(SCIP* scip)
738
738
  SCIP_Real SCIPfeastol(SCIP* scip)
739
739
  SCIP_RETCODE SCIPsetObjIntegral(SCIP* scip)
740
+ SCIP_Bool SCIPisObjIntegral(SCIP* scip)
740
741
  SCIP_Real SCIPgetLocalOrigEstimate(SCIP* scip)
741
742
  SCIP_Real SCIPgetLocalTransEstimate(SCIP* scip)
742
743
 
@@ -870,6 +871,18 @@ cdef extern from "scip/scip.h":
870
871
  SCIP_Longint SCIPvarGetNBranchingsCurrentRun(SCIP_VAR* var, SCIP_BRANCHDIR dir)
871
872
  SCIP_Bool SCIPvarMayRoundUp(SCIP_VAR* var)
872
873
  SCIP_Bool SCIPvarMayRoundDown(SCIP_VAR* var)
874
+ SCIP_Bool SCIPvarIsActive(SCIP_VAR* var)
875
+ SCIP_Real SCIPadjustedVarLb(SCIP* scip, SCIP_VAR* var, SCIP_Real lb)
876
+ SCIP_Real SCIPadjustedVarUb(SCIP* scip, SCIP_VAR* var, SCIP_Real ub)
877
+ SCIP_RETCODE SCIPaggregateVars(SCIP* scip,
878
+ SCIP_VAR* varx,
879
+ SCIP_VAR* vary,
880
+ SCIP_Real scalarx,
881
+ SCIP_Real scalary,
882
+ SCIP_Real rhs,
883
+ SCIP_Bool* infeasible,
884
+ SCIP_Bool* redundant,
885
+ SCIP_Bool* aggregated)
873
886
 
874
887
  # LP Methods
875
888
  SCIP_RETCODE SCIPgetLPColsData(SCIP* scip, SCIP_COL*** cols, int* ncols)
@@ -1472,6 +1485,7 @@ cdef extern from "scip/scip.h":
1472
1485
  int SCIPgetPlungeDepth(SCIP* scip)
1473
1486
  SCIP_Longint SCIPgetNNodeLPIterations(SCIP* scip)
1474
1487
  SCIP_Longint SCIPgetNStrongbranchLPIterations(SCIP* scip)
1488
+ SCIP_Real SCIPgetPrimalDualIntegral(SCIP* scip)
1475
1489
 
1476
1490
  # Parameter Functions
1477
1491
  SCIP_RETCODE SCIPsetBoolParam(SCIP* scip, char* name, SCIP_Bool value)
@@ -2110,6 +2124,8 @@ cdef extern from "tpi/tpi.h":
2110
2124
  cdef class Expr:
2111
2125
  cdef public terms
2112
2126
 
2127
+ cpdef double _evaluate(self, Solution sol)
2128
+
2113
2129
  cdef class Event:
2114
2130
  cdef SCIP_EVENT* event
2115
2131
  # can be used to store problem data
@@ -2222,12 +2238,17 @@ cdef class Model:
2222
2238
  cdef SCIP_Bool _freescip
2223
2239
  # map to store python variables
2224
2240
  cdef _modelvars
2241
+ # map to store python constraints
2242
+ cdef _modelconss
2225
2243
  # used to keep track of the number of event handlers generated
2226
2244
  cdef int _generated_event_handlers_count
2227
2245
  # store references to Benders subproblem Models for proper cleanup
2228
2246
  cdef _benders_subproblems
2229
2247
  # store iis, if found
2230
2248
  cdef SCIP_IIS* _iis
2249
+ # helper methods for later var and cons cleanup
2250
+ cdef _getOrCreateCons(self, SCIP_CONS* scip_cons)
2251
+ cdef _getOrCreateVar(self, SCIP_VAR* scip_var)
2231
2252
 
2232
2253
  @staticmethod
2233
2254
  cdef create(SCIP* scip)