gamspy 1.18.3__py3-none-any.whl → 1.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. gamspy/__init__.py +86 -98
  2. gamspy/__main__.py +6 -6
  3. gamspy/_algebra/__init__.py +13 -13
  4. gamspy/_algebra/condition.py +290 -194
  5. gamspy/_algebra/domain.py +103 -93
  6. gamspy/_algebra/expression.py +820 -799
  7. gamspy/_algebra/number.py +79 -70
  8. gamspy/_algebra/operable.py +185 -185
  9. gamspy/_algebra/operation.py +948 -845
  10. gamspy/_backend/backend.py +313 -311
  11. gamspy/_backend/engine.py +960 -960
  12. gamspy/_backend/local.py +124 -124
  13. gamspy/_backend/neos.py +567 -567
  14. gamspy/_cli/__init__.py +1 -1
  15. gamspy/_cli/cli.py +64 -64
  16. gamspy/_cli/gdx.py +377 -377
  17. gamspy/_cli/install.py +375 -372
  18. gamspy/_cli/list.py +94 -94
  19. gamspy/_cli/mps2gms.py +128 -128
  20. gamspy/_cli/probe.py +52 -52
  21. gamspy/_cli/retrieve.py +79 -79
  22. gamspy/_cli/run.py +158 -158
  23. gamspy/_cli/show.py +246 -255
  24. gamspy/_cli/uninstall.py +165 -165
  25. gamspy/_cli/util.py +94 -94
  26. gamspy/_communication.py +215 -215
  27. gamspy/_config.py +132 -132
  28. gamspy/_container.py +1694 -1452
  29. gamspy/_convert.py +720 -720
  30. gamspy/_database.py +271 -271
  31. gamspy/_extrinsic.py +181 -181
  32. gamspy/_miro.py +356 -352
  33. gamspy/_model.py +1803 -1615
  34. gamspy/_model_instance.py +701 -701
  35. gamspy/_options.py +780 -700
  36. gamspy/_serialization.py +156 -144
  37. gamspy/_symbols/__init__.py +17 -17
  38. gamspy/_symbols/alias.py +305 -299
  39. gamspy/_symbols/equation.py +1407 -1298
  40. gamspy/_symbols/implicits/__init__.py +11 -11
  41. gamspy/_symbols/implicits/implicit_equation.py +186 -186
  42. gamspy/_symbols/implicits/implicit_parameter.py +272 -272
  43. gamspy/_symbols/implicits/implicit_set.py +124 -124
  44. gamspy/_symbols/implicits/implicit_symbol.py +315 -315
  45. gamspy/_symbols/implicits/implicit_variable.py +255 -255
  46. gamspy/_symbols/parameter.py +648 -609
  47. gamspy/_symbols/set.py +985 -923
  48. gamspy/_symbols/symbol.py +395 -386
  49. gamspy/_symbols/universe_alias.py +182 -182
  50. gamspy/_symbols/variable.py +1101 -1017
  51. gamspy/_types.py +7 -7
  52. gamspy/_validation.py +735 -735
  53. gamspy/_workspace.py +72 -72
  54. gamspy/exceptions.py +128 -128
  55. gamspy/formulations/__init__.py +46 -46
  56. gamspy/formulations/ml/__init__.py +11 -11
  57. gamspy/formulations/ml/decision_tree_struct.py +80 -80
  58. gamspy/formulations/ml/gradient_boosting.py +203 -203
  59. gamspy/formulations/ml/random_forest.py +187 -187
  60. gamspy/formulations/ml/regression_tree.py +533 -533
  61. gamspy/formulations/nn/__init__.py +19 -19
  62. gamspy/formulations/nn/avgpool2d.py +232 -232
  63. gamspy/formulations/nn/conv1d.py +533 -533
  64. gamspy/formulations/nn/conv2d.py +529 -529
  65. gamspy/formulations/nn/linear.py +341 -341
  66. gamspy/formulations/nn/maxpool2d.py +88 -88
  67. gamspy/formulations/nn/minpool2d.py +88 -88
  68. gamspy/formulations/nn/mpool2d.py +245 -245
  69. gamspy/formulations/nn/torch_sequential.py +278 -278
  70. gamspy/formulations/piecewise.py +682 -682
  71. gamspy/formulations/result.py +119 -119
  72. gamspy/formulations/shape.py +188 -188
  73. gamspy/formulations/utils.py +173 -173
  74. gamspy/math/__init__.py +215 -215
  75. gamspy/math/activation.py +783 -767
  76. gamspy/math/log_power.py +435 -435
  77. gamspy/math/matrix.py +534 -534
  78. gamspy/math/misc.py +1709 -1625
  79. gamspy/math/probability.py +170 -170
  80. gamspy/math/trigonometric.py +232 -232
  81. gamspy/utils.py +810 -791
  82. gamspy/version.py +5 -5
  83. {gamspy-1.18.3.dist-info → gamspy-1.19.0.dist-info}/METADATA +90 -121
  84. gamspy-1.19.0.dist-info/RECORD +90 -0
  85. {gamspy-1.18.3.dist-info → gamspy-1.19.0.dist-info}/WHEEL +1 -1
  86. {gamspy-1.18.3.dist-info → gamspy-1.19.0.dist-info}/licenses/LICENSE +22 -22
  87. gamspy-1.18.3.dist-info/RECORD +0 -90
  88. {gamspy-1.18.3.dist-info → gamspy-1.19.0.dist-info}/entry_points.txt +0 -0
  89. {gamspy-1.18.3.dist-info → gamspy-1.19.0.dist-info}/top_level.txt +0 -0
@@ -1,341 +1,341 @@
1
- from __future__ import annotations
2
-
3
- import math
4
- from typing import TYPE_CHECKING
5
-
6
- import numpy as np
7
-
8
- import gamspy as gp
9
- import gamspy.formulations.utils as utils
10
- from gamspy.exceptions import ValidationError
11
- from gamspy.math import dim
12
-
13
- if TYPE_CHECKING:
14
- from gamspy import Parameter, Variable
15
-
16
-
17
- class Linear:
18
- """
19
- Formulation generator for Linear layer in GAMS.
20
-
21
- Parameters
22
- ----------
23
- container : Container
24
- Container that will contain the new variable and equations.
25
- in_features : int
26
- Input feature size
27
- out_features : int
28
- Output feature size
29
- bias : bool = True
30
- Should bias be added after linear transformation, by Default: True
31
- name_prefix : str | None
32
- Prefix for generated GAMSPy symbols, by default None which means
33
- random prefix. Using the same name_prefix in different formulations causes name
34
- conflicts. Do not use the same name_prefix again.
35
-
36
- Examples
37
- --------
38
- >>> import gamspy as gp
39
- >>> import numpy as np
40
- >>> from gamspy.math import dim
41
- >>> m = gp.Container()
42
- >>> l1 = gp.formulations.Linear(m, 128, 64)
43
- >>> w = np.random.rand(64, 128)
44
- >>> b = np.random.rand(64)
45
- >>> l1.load_weights(w, b)
46
- >>> x = gp.Variable(m, "x", domain=dim([10, 128]))
47
- >>> y, set_y = l1(x)
48
- >>> [d.name for d in y.domain]
49
- ['DenseDim10_1', 'DenseDim64_1']
50
-
51
- """
52
-
53
- def __init__(
54
- self,
55
- container: gp.Container,
56
- in_features: int,
57
- out_features: int,
58
- bias: bool = True,
59
- name_prefix: str | None = None,
60
- ):
61
- if not isinstance(in_features, int) or in_features <= 0:
62
- raise ValidationError("in_features must be a positive integer")
63
-
64
- if not isinstance(out_features, int) or out_features <= 0:
65
- raise ValidationError("out_features must be a positive integer")
66
-
67
- if not isinstance(bias, bool):
68
- raise ValidationError("bias must be a boolean")
69
-
70
- self.container = container
71
- self.in_features = in_features
72
- self.out_features = out_features
73
- self.use_bias = bias
74
- self._state = 0
75
- self.weight: Parameter | Variable | None = None
76
- self.weight_array = None
77
- self.bias: Parameter | Variable | None = None
78
- self.bias_array = None
79
-
80
- if name_prefix is None:
81
- name_prefix = gp.utils._get_unique_name()
82
-
83
- self._name_prefix = name_prefix
84
-
85
- def load_weights(self, weight: np.ndarray, bias: np.ndarray | None = None) -> None:
86
- """
87
- Mark Linear as parameter and load weights from NumPy arrays.
88
- After this is called `make_variable` cannot be called. Use this
89
- when you already have the weights of your Linear layer.
90
-
91
- Parameters
92
- ----------
93
- weight : np.ndarray
94
- Linear layer weights in shape (out_features x in_features)
95
- bias : np.ndarray | None
96
- Linear layer bias in shape (out_features, ), only required when
97
- bias=True during initialization
98
-
99
- """
100
- if self._state == 2:
101
- raise ValidationError(
102
- "load_weights cannot be used after calling make_variable"
103
- )
104
-
105
- if self.use_bias is False and bias is not None:
106
- raise ValidationError(
107
- "bias must be None since bias was set to False during initialization"
108
- )
109
-
110
- if self.use_bias is True and bias is None:
111
- raise ValidationError("bias must be provided")
112
-
113
- if len(weight.shape) != 2:
114
- raise ValidationError(
115
- f"expected 2D input for weight (got {len(weight.shape)}D input)"
116
- )
117
-
118
- expected_shape = (
119
- self.out_features,
120
- self.in_features,
121
- )
122
- if weight.shape != expected_shape:
123
- raise ValidationError(f"weight expected to be in shape {expected_shape}")
124
-
125
- if bias is not None:
126
- if len(bias.shape) != 1:
127
- raise ValidationError(
128
- f"expected 1D input for bias (got {len(bias.shape)}D input)"
129
- )
130
-
131
- if bias.shape[0] != self.out_features:
132
- raise ValidationError(
133
- f"bias expected to be in shape ({self.out_features},)"
134
- )
135
-
136
- if self.weight is None:
137
- self.weight = gp.Parameter(
138
- self.container,
139
- name=utils._generate_name("p", self._name_prefix, "weight"),
140
- domain=dim(expected_shape),
141
- records=weight,
142
- )
143
- else:
144
- self.weight.setRecords(weight)
145
- self.weight_array = weight
146
-
147
- if self.use_bias:
148
- if self.bias is None:
149
- self.bias = gp.Parameter(
150
- self.container,
151
- name=utils._generate_name("p", self._name_prefix, "bias"),
152
- domain=dim([self.out_features]),
153
- records=bias,
154
- )
155
- else:
156
- self.bias.setRecords(bias)
157
-
158
- self.bias_array = bias
159
-
160
- self._state = 1
161
-
162
- def make_variable(self, *, init_weights=False) -> None:
163
- """
164
- Mark Linear layer as variable. After this is called `load_weights`
165
- cannot be called. Use this when you need to learn the weights
166
- of your linear layer in your optimization model.
167
-
168
-
169
- Parameters
170
- ----------
171
- init_weights : Optional[bool]
172
- False by default.
173
- Whether to initialize weights. It is suggested you set
174
- this to True unless you want to initialize weights yourself.
175
- When `init_weights` is set to True, values are initialized from
176
- :math:`\\mathcal{U}(-\\sqrt{k},\\sqrt{k})`, where :math:`k = 1/in\\_features`.
177
- """
178
- if self._state == 1:
179
- raise ValidationError(
180
- "make_variable cannot be used after calling load_weights"
181
- )
182
-
183
- expected_shape = (
184
- self.out_features,
185
- self.in_features,
186
- )
187
-
188
- sk = math.sqrt(1 / self.in_features)
189
- if self.weight is None:
190
- self.weight = gp.Variable(
191
- self.container,
192
- name=utils._generate_name("v", self._name_prefix, "weight"),
193
- domain=dim(expected_shape),
194
- )
195
- if init_weights:
196
- self.weight.l[...] = gp.math.uniform(-sk, sk)
197
-
198
- if self.use_bias and self.bias is None:
199
- self.bias = gp.Variable(
200
- self.container,
201
- name=utils._generate_name("v", self._name_prefix, "bias"),
202
- domain=dim([self.out_features]),
203
- )
204
- if init_weights:
205
- self.bias.l[...] = gp.math.uniform(-sk, sk)
206
-
207
- self._state = 2
208
-
209
- def __call__(
210
- self, input: gp.Parameter | gp.Variable, propagate_bounds: bool = True
211
- ) -> tuple[gp.Variable, list[gp.Equation]]:
212
- """
213
- Forward pass your input, generate output and equations required for
214
- calculating the linear transformation. If `propagate_bounds` is True,
215
- the `input` is of type variable, and `load_weights` was called, then
216
- the bounds of the input are propagated to the output.
217
-
218
- Parameters
219
- ----------
220
- input : gp.Parameter | gp.Variable
221
- input to the linear layer, must be in shape
222
- (* x in_features)
223
- propagate_bounds : bool = True
224
- If True, propagate bounds of the input to the output.
225
- Otherwise, the output variable is unbounded.
226
- """
227
- if not isinstance(propagate_bounds, bool):
228
- raise ValidationError("propagate_bounds should be a boolean.")
229
-
230
- if self.weight is None:
231
- raise ValidationError(
232
- "You must call load_weights or make_variable first before using the Linear"
233
- )
234
-
235
- if len(input.domain) == 0:
236
- raise ValidationError("expected an input with at least 1 dimension")
237
-
238
- if len(input.domain[-1]) != self.in_features:
239
- raise ValidationError("in_features does not match")
240
-
241
- expr = input @ self.weight.t()
242
-
243
- if self.bias is not None:
244
- expr = expr + self.bias[expr.domain[-1]]
245
-
246
- out = gp.Variable(
247
- self.container,
248
- name=utils._generate_name("v", self._name_prefix, "output"),
249
- domain=expr.domain,
250
- )
251
-
252
- set_out = gp.Equation(
253
- self.container,
254
- name=utils._generate_name("e", self._name_prefix, "set_output"),
255
- domain=out.domain,
256
- )
257
-
258
- set_out[...] = out == expr
259
-
260
- # If propagate_bounds is True, weight is a parameter and input is a variable,
261
- # we will propagate the bounds of the input to the output
262
- if propagate_bounds and self._state == 1 and isinstance(input, gp.Variable):
263
- x_bounds = gp.Parameter(
264
- self.container,
265
- name=utils._generate_name("p", self._name_prefix, "input_bounds"),
266
- domain=dim([2, *input.shape]),
267
- )
268
- x_bounds[("0",) + tuple(input.domain)] = input.lo[...]
269
- x_bounds[("1",) + tuple(input.domain)] = input.up[...]
270
-
271
- # If the bounds are all zeros (None in GAMSPy parameters);
272
- # we skip matrix multiplication as it will result in zero values
273
- if x_bounds.records is None:
274
- out_bounds_array = np.zeros(out.shape)
275
-
276
- if self.use_bias:
277
- out_bounds_array = out_bounds_array + self.bias_array
278
-
279
- out_bounds = gp.Parameter(
280
- self.container,
281
- name=utils._generate_name("p", self._name_prefix, "output_bounds"),
282
- domain=dim(out.shape),
283
- records=out_bounds_array,
284
- )
285
- out.lo[...] = out_bounds
286
- out.up[...] = out_bounds
287
-
288
- return out, [set_out]
289
-
290
- x_lb, x_ub = x_bounds.toDense()
291
-
292
- # To deal with infinity values in the input bounds, we convert them into complex numbers
293
- # where if the value is -inf, we convert it to 0 - 1j
294
- # and if the value is inf, we convert it to 0 + 1j
295
- x_lb = np.where(x_lb == -np.inf, 0 - 1j, x_lb)
296
- x_ub = np.where(x_ub == np.inf, 0 + 1j, x_ub)
297
-
298
- # get the positive and negative weights separately
299
- w_pos = np.maximum(self.weight_array, 0)
300
- w_neg = np.minimum(self.weight_array, 0)
301
-
302
- lo_out = (x_lb @ w_pos.T) + (x_ub @ w_neg.T)
303
- up_out = (x_ub @ w_pos.T) + (x_lb @ w_neg.T)
304
-
305
- def _decode_complex_number(z: np.complex128) -> float:
306
- """
307
- Decode complex number to real number.
308
- 5 + 0j -> 5
309
- 3 + 1j -> inf
310
- 7 - 3j -> -inf
311
- """
312
- # If imaginary part is zero, return real part
313
- if z.imag == 0:
314
- return z.real
315
- # If imaginary part is positive, return positive infinity
316
- elif z.imag > 0:
317
- return np.inf
318
- # If imaginary part is negative, return negative infinity
319
- else:
320
- return -np.inf
321
-
322
- lo_out = np.vectorize(_decode_complex_number)(lo_out)
323
- up_out = np.vectorize(_decode_complex_number)(up_out)
324
-
325
- if self.use_bias:
326
- lo_out = lo_out + self.bias_array
327
- up_out = up_out + self.bias_array
328
-
329
- out_bounds_array = np.stack([lo_out, up_out], axis=0)
330
-
331
- out_bounds = gp.Parameter(
332
- self.container,
333
- name=utils._generate_name("p", self._name_prefix, "output_bounds"),
334
- domain=dim([2, *out.shape]),
335
- records=out_bounds_array,
336
- )
337
-
338
- out.lo[...] = out_bounds[("0",) + tuple(out.domain)]
339
- out.up[...] = out_bounds[("1",) + tuple(out.domain)]
340
-
341
- return out, [set_out]
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from typing import TYPE_CHECKING
5
+
6
+ import numpy as np
7
+
8
+ import gamspy as gp
9
+ import gamspy.formulations.utils as utils
10
+ from gamspy.exceptions import ValidationError
11
+ from gamspy.math import dim
12
+
13
+ if TYPE_CHECKING:
14
+ from gamspy import Parameter, Variable
15
+
16
+
17
+ class Linear:
18
+ """
19
+ Formulation generator for Linear layer in GAMS.
20
+
21
+ Parameters
22
+ ----------
23
+ container : Container
24
+ Container that will contain the new variable and equations.
25
+ in_features : int
26
+ Input feature size
27
+ out_features : int
28
+ Output feature size
29
+ bias : bool = True
30
+ Should bias be added after linear transformation, by Default: True
31
+ name_prefix : str | None
32
+ Prefix for generated GAMSPy symbols, by default None which means
33
+ random prefix. Using the same name_prefix in different formulations causes name
34
+ conflicts. Do not use the same name_prefix again.
35
+
36
+ Examples
37
+ --------
38
+ >>> import gamspy as gp
39
+ >>> import numpy as np
40
+ >>> from gamspy.math import dim
41
+ >>> m = gp.Container()
42
+ >>> l1 = gp.formulations.Linear(m, 128, 64)
43
+ >>> w = np.random.rand(64, 128)
44
+ >>> b = np.random.rand(64)
45
+ >>> l1.load_weights(w, b)
46
+ >>> x = gp.Variable(m, "x", domain=dim([10, 128]))
47
+ >>> y, set_y = l1(x)
48
+ >>> [d.name for d in y.domain]
49
+ ['DenseDim10_1', 'DenseDim64_1']
50
+
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ container: gp.Container,
56
+ in_features: int,
57
+ out_features: int,
58
+ bias: bool = True,
59
+ name_prefix: str | None = None,
60
+ ):
61
+ if not isinstance(in_features, int) or in_features <= 0:
62
+ raise ValidationError("in_features must be a positive integer")
63
+
64
+ if not isinstance(out_features, int) or out_features <= 0:
65
+ raise ValidationError("out_features must be a positive integer")
66
+
67
+ if not isinstance(bias, bool):
68
+ raise ValidationError("bias must be a boolean")
69
+
70
+ self.container = container
71
+ self.in_features = in_features
72
+ self.out_features = out_features
73
+ self.use_bias = bias
74
+ self._state = 0
75
+ self.weight: Parameter | Variable | None = None
76
+ self.weight_array = None
77
+ self.bias: Parameter | Variable | None = None
78
+ self.bias_array = None
79
+
80
+ if name_prefix is None:
81
+ name_prefix = gp.utils._get_unique_name()
82
+
83
+ self._name_prefix = name_prefix
84
+
85
+ def load_weights(self, weight: np.ndarray, bias: np.ndarray | None = None) -> None:
86
+ """
87
+ Mark Linear as parameter and load weights from NumPy arrays.
88
+ After this is called `make_variable` cannot be called. Use this
89
+ when you already have the weights of your Linear layer.
90
+
91
+ Parameters
92
+ ----------
93
+ weight : np.ndarray
94
+ Linear layer weights in shape (out_features x in_features)
95
+ bias : np.ndarray | None
96
+ Linear layer bias in shape (out_features, ), only required when
97
+ bias=True during initialization
98
+
99
+ """
100
+ if self._state == 2:
101
+ raise ValidationError(
102
+ "load_weights cannot be used after calling make_variable"
103
+ )
104
+
105
+ if self.use_bias is False and bias is not None:
106
+ raise ValidationError(
107
+ "bias must be None since bias was set to False during initialization"
108
+ )
109
+
110
+ if self.use_bias is True and bias is None:
111
+ raise ValidationError("bias must be provided")
112
+
113
+ if len(weight.shape) != 2:
114
+ raise ValidationError(
115
+ f"expected 2D input for weight (got {len(weight.shape)}D input)"
116
+ )
117
+
118
+ expected_shape = (
119
+ self.out_features,
120
+ self.in_features,
121
+ )
122
+ if weight.shape != expected_shape:
123
+ raise ValidationError(f"weight expected to be in shape {expected_shape}")
124
+
125
+ if bias is not None:
126
+ if len(bias.shape) != 1:
127
+ raise ValidationError(
128
+ f"expected 1D input for bias (got {len(bias.shape)}D input)"
129
+ )
130
+
131
+ if bias.shape[0] != self.out_features:
132
+ raise ValidationError(
133
+ f"bias expected to be in shape ({self.out_features},)"
134
+ )
135
+
136
+ if self.weight is None:
137
+ self.weight = gp.Parameter(
138
+ self.container,
139
+ name=utils._generate_name("p", self._name_prefix, "weight"),
140
+ domain=dim(expected_shape),
141
+ records=weight,
142
+ )
143
+ else:
144
+ self.weight.setRecords(weight)
145
+ self.weight_array = weight
146
+
147
+ if self.use_bias:
148
+ if self.bias is None:
149
+ self.bias = gp.Parameter(
150
+ self.container,
151
+ name=utils._generate_name("p", self._name_prefix, "bias"),
152
+ domain=dim([self.out_features]),
153
+ records=bias,
154
+ )
155
+ else:
156
+ self.bias.setRecords(bias)
157
+
158
+ self.bias_array = bias
159
+
160
+ self._state = 1
161
+
162
+ def make_variable(self, *, init_weights=False) -> None:
163
+ """
164
+ Mark Linear layer as variable. After this is called `load_weights`
165
+ cannot be called. Use this when you need to learn the weights
166
+ of your linear layer in your optimization model.
167
+
168
+
169
+ Parameters
170
+ ----------
171
+ init_weights : Optional[bool]
172
+ False by default.
173
+ Whether to initialize weights. It is suggested you set
174
+ this to True unless you want to initialize weights yourself.
175
+ When `init_weights` is set to True, values are initialized from
176
+ :math:`\\mathcal{U}(-\\sqrt{k},\\sqrt{k})`, where :math:`k = 1/in\\_features`.
177
+ """
178
+ if self._state == 1:
179
+ raise ValidationError(
180
+ "make_variable cannot be used after calling load_weights"
181
+ )
182
+
183
+ expected_shape = (
184
+ self.out_features,
185
+ self.in_features,
186
+ )
187
+
188
+ sk = math.sqrt(1 / self.in_features)
189
+ if self.weight is None:
190
+ self.weight = gp.Variable(
191
+ self.container,
192
+ name=utils._generate_name("v", self._name_prefix, "weight"),
193
+ domain=dim(expected_shape),
194
+ )
195
+ if init_weights:
196
+ self.weight.l[...] = gp.math.uniform(-sk, sk)
197
+
198
+ if self.use_bias and self.bias is None:
199
+ self.bias = gp.Variable(
200
+ self.container,
201
+ name=utils._generate_name("v", self._name_prefix, "bias"),
202
+ domain=dim([self.out_features]),
203
+ )
204
+ if init_weights:
205
+ self.bias.l[...] = gp.math.uniform(-sk, sk)
206
+
207
+ self._state = 2
208
+
209
+ def __call__(
210
+ self, input: gp.Parameter | gp.Variable, propagate_bounds: bool = True
211
+ ) -> tuple[gp.Variable, list[gp.Equation]]:
212
+ """
213
+ Forward pass your input, generate output and equations required for
214
+ calculating the linear transformation. If `propagate_bounds` is True,
215
+ the `input` is of type variable, and `load_weights` was called, then
216
+ the bounds of the input are propagated to the output.
217
+
218
+ Parameters
219
+ ----------
220
+ input : gp.Parameter | gp.Variable
221
+ input to the linear layer, must be in shape
222
+ (* x in_features)
223
+ propagate_bounds : bool = True
224
+ If True, propagate bounds of the input to the output.
225
+ Otherwise, the output variable is unbounded.
226
+ """
227
+ if not isinstance(propagate_bounds, bool):
228
+ raise ValidationError("propagate_bounds should be a boolean.")
229
+
230
+ if self.weight is None:
231
+ raise ValidationError(
232
+ "You must call load_weights or make_variable first before using the Linear"
233
+ )
234
+
235
+ if len(input.domain) == 0:
236
+ raise ValidationError("expected an input with at least 1 dimension")
237
+
238
+ if len(input.domain[-1]) != self.in_features:
239
+ raise ValidationError("in_features does not match")
240
+
241
+ expr = input @ self.weight.t()
242
+
243
+ if self.bias is not None:
244
+ expr = expr + self.bias[expr.domain[-1]]
245
+
246
+ out = gp.Variable(
247
+ self.container,
248
+ name=utils._generate_name("v", self._name_prefix, "output"),
249
+ domain=expr.domain,
250
+ )
251
+
252
+ set_out = gp.Equation(
253
+ self.container,
254
+ name=utils._generate_name("e", self._name_prefix, "set_output"),
255
+ domain=out.domain,
256
+ )
257
+
258
+ set_out[...] = out == expr
259
+
260
+ # If propagate_bounds is True, weight is a parameter and input is a variable,
261
+ # we will propagate the bounds of the input to the output
262
+ if propagate_bounds and self._state == 1 and isinstance(input, gp.Variable):
263
+ x_bounds = gp.Parameter(
264
+ self.container,
265
+ name=utils._generate_name("p", self._name_prefix, "input_bounds"),
266
+ domain=dim([2, *input.shape]),
267
+ )
268
+ x_bounds[("0",) + tuple(input.domain)] = input.lo[...]
269
+ x_bounds[("1",) + tuple(input.domain)] = input.up[...]
270
+
271
+ # If the bounds are all zeros (None in GAMSPy parameters);
272
+ # we skip matrix multiplication as it will result in zero values
273
+ if x_bounds.records is None:
274
+ out_bounds_array = np.zeros(out.shape)
275
+
276
+ if self.use_bias:
277
+ out_bounds_array = out_bounds_array + self.bias_array
278
+
279
+ out_bounds = gp.Parameter(
280
+ self.container,
281
+ name=utils._generate_name("p", self._name_prefix, "output_bounds"),
282
+ domain=dim(out.shape),
283
+ records=out_bounds_array,
284
+ )
285
+ out.lo[...] = out_bounds
286
+ out.up[...] = out_bounds
287
+
288
+ return out, [set_out]
289
+
290
+ x_lb, x_ub = x_bounds.toDense()
291
+
292
+ # To deal with infinity values in the input bounds, we convert them into complex numbers
293
+ # where if the value is -inf, we convert it to 0 - 1j
294
+ # and if the value is inf, we convert it to 0 + 1j
295
+ x_lb = np.where(x_lb == -np.inf, 0 - 1j, x_lb)
296
+ x_ub = np.where(x_ub == np.inf, 0 + 1j, x_ub)
297
+
298
+ # get the positive and negative weights separately
299
+ w_pos = np.maximum(self.weight_array, 0)
300
+ w_neg = np.minimum(self.weight_array, 0)
301
+
302
+ lo_out = (x_lb @ w_pos.T) + (x_ub @ w_neg.T)
303
+ up_out = (x_ub @ w_pos.T) + (x_lb @ w_neg.T)
304
+
305
+ def _decode_complex_number(z: np.complex128) -> float:
306
+ """
307
+ Decode complex number to real number.
308
+ 5 + 0j -> 5
309
+ 3 + 1j -> inf
310
+ 7 - 3j -> -inf
311
+ """
312
+ # If imaginary part is zero, return real part
313
+ if z.imag == 0:
314
+ return z.real
315
+ # If imaginary part is positive, return positive infinity
316
+ elif z.imag > 0:
317
+ return np.inf
318
+ # If imaginary part is negative, return negative infinity
319
+ else:
320
+ return -np.inf
321
+
322
+ lo_out = np.vectorize(_decode_complex_number)(lo_out)
323
+ up_out = np.vectorize(_decode_complex_number)(up_out)
324
+
325
+ if self.use_bias:
326
+ lo_out = lo_out + self.bias_array
327
+ up_out = up_out + self.bias_array
328
+
329
+ out_bounds_array = np.stack([lo_out, up_out], axis=0)
330
+
331
+ out_bounds = gp.Parameter(
332
+ self.container,
333
+ name=utils._generate_name("p", self._name_prefix, "output_bounds"),
334
+ domain=dim([2, *out.shape]),
335
+ records=out_bounds_array,
336
+ )
337
+
338
+ out.lo[...] = out_bounds[("0",) + tuple(out.domain)]
339
+ out.up[...] = out_bounds[("1",) + tuple(out.domain)]
340
+
341
+ return out, [set_out]