enncode 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. enncode-0.1.5/LICENSE.txt +21 -0
  2. enncode-0.1.5/PKG-INFO +371 -0
  3. enncode-0.1.5/README.md +345 -0
  4. enncode-0.1.5/enncode/__init__.py +0 -0
  5. enncode-0.1.5/enncode/compatibility.py +460 -0
  6. enncode-0.1.5/enncode/gurobiModelBuilder.py +141 -0
  7. enncode-0.1.5/enncode/internalOnnx.py +17 -0
  8. enncode-0.1.5/enncode/networkBuilder.py +241 -0
  9. enncode-0.1.5/enncode/operators/__init__.py +0 -0
  10. enncode-0.1.5/enncode/operators/add.py +107 -0
  11. enncode-0.1.5/enncode/operators/averagepool.py +134 -0
  12. enncode-0.1.5/enncode/operators/base_operator.py +36 -0
  13. enncode-0.1.5/enncode/operators/batch_normalization.py +103 -0
  14. enncode-0.1.5/enncode/operators/concat.py +83 -0
  15. enncode-0.1.5/enncode/operators/conv.py +124 -0
  16. enncode-0.1.5/enncode/operators/div.py +104 -0
  17. enncode-0.1.5/enncode/operators/dropout.py +75 -0
  18. enncode-0.1.5/enncode/operators/flatten.py +87 -0
  19. enncode-0.1.5/enncode/operators/gemm.py +121 -0
  20. enncode-0.1.5/enncode/operators/identity.py +54 -0
  21. enncode-0.1.5/enncode/operators/matmul.py +122 -0
  22. enncode-0.1.5/enncode/operators/maxpool.py +122 -0
  23. enncode-0.1.5/enncode/operators/mul.py +109 -0
  24. enncode-0.1.5/enncode/operators/operator_factory.py +74 -0
  25. enncode-0.1.5/enncode/operators/relu.py +105 -0
  26. enncode-0.1.5/enncode/operators/reshape.py +90 -0
  27. enncode-0.1.5/enncode/operators/sub.py +105 -0
  28. enncode-0.1.5/enncode/operators/unsqueeze.py +90 -0
  29. enncode-0.1.5/enncode/parser.py +105 -0
  30. enncode-0.1.5/enncode/parsers/__init__.py +0 -0
  31. enncode-0.1.5/enncode/parsers/add_parser.py +54 -0
  32. enncode-0.1.5/enncode/parsers/averagepool_parser.py +99 -0
  33. enncode-0.1.5/enncode/parsers/base_parser.py +23 -0
  34. enncode-0.1.5/enncode/parsers/batch_normalization_parser.py +66 -0
  35. enncode-0.1.5/enncode/parsers/concat_parser.py +92 -0
  36. enncode-0.1.5/enncode/parsers/constant_parser.py +77 -0
  37. enncode-0.1.5/enncode/parsers/conv_parser.py +98 -0
  38. enncode-0.1.5/enncode/parsers/div_parser.py +82 -0
  39. enncode-0.1.5/enncode/parsers/dropout_parser.py +69 -0
  40. enncode-0.1.5/enncode/parsers/flatten_parser.py +80 -0
  41. enncode-0.1.5/enncode/parsers/gemm_parser.py +62 -0
  42. enncode-0.1.5/enncode/parsers/identity_parser.py +65 -0
  43. enncode-0.1.5/enncode/parsers/matmul_parser.py +85 -0
  44. enncode-0.1.5/enncode/parsers/maxpool_parser.py +99 -0
  45. enncode-0.1.5/enncode/parsers/mul_parser.py +80 -0
  46. enncode-0.1.5/enncode/parsers/parser_factory.py +69 -0
  47. enncode-0.1.5/enncode/parsers/relu_parser.py +53 -0
  48. enncode-0.1.5/enncode/parsers/reshape_parser.py +75 -0
  49. enncode-0.1.5/enncode/parsers/shape_parser.py +62 -0
  50. enncode-0.1.5/enncode/parsers/sub_parser.py +64 -0
  51. enncode-0.1.5/enncode/parsers/unsqueeze_parser.py +62 -0
  52. enncode-0.1.5/enncode/utils.py +118 -0
  53. enncode-0.1.5/enncode.egg-info/PKG-INFO +371 -0
  54. enncode-0.1.5/enncode.egg-info/SOURCES.txt +59 -0
  55. enncode-0.1.5/enncode.egg-info/dependency_links.txt +1 -0
  56. enncode-0.1.5/enncode.egg-info/requires.txt +10 -0
  57. enncode-0.1.5/enncode.egg-info/top_level.txt +1 -0
  58. enncode-0.1.5/pyproject.toml +40 -0
  59. enncode-0.1.5/setup.cfg +4 -0
  60. enncode-0.1.5/tests/test_integration.py +191 -0
  61. enncode-0.1.5/tests/tests_utils.py +250 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Muhammad Atallah
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
enncode-0.1.5/PKG-INFO ADDED
@@ -0,0 +1,371 @@
1
+ Metadata-Version: 2.4
2
+ Name: enncode
3
+ Version: 0.1.5
4
+ Summary: The enncode is a Python library that creates Gurobi models for neural networks in ONNX format.
5
+ Author-email: Muhammad Atallah <muhammad.sabri@outlook.com>, Lukas Dankwart <lukas.dankwart@tu-dortmund.de>, Mustafa Yalciner <mustafa.yalciner@tu-dortmund.de>
6
+ License-Expression: MIT
7
+ Project-URL: Repository, https://github.com/Muhammad-Atallah/onnxgurobi
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.11
10
+ Classifier: Programming Language :: Python :: 3.12
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.11
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE.txt
15
+ Requires-Dist: numpy>=1.26.4
16
+ Requires-Dist: onnx>=1.16.2
17
+ Requires-Dist: onnxruntime>=1.20.1
18
+ Requires-Dist: onnxscript>=0.5.6
19
+ Requires-Dist: torch>=2.5.1
20
+ Requires-Dist: pytest>=8.3.5
21
+ Requires-Dist: tensorflow>=2.19.0
22
+ Requires-Dist: matplotlib>=3.10.1
23
+ Requires-Dist: vnnlib>=0.0.1
24
+ Requires-Dist: gurobipy>=12.0.3
25
+ Dynamic: license-file
26
+
27
+ # Overview
28
+
29
+ The ONNXGurobi is a Python library that creates Gurobi models for neural networks in ONNX format.
30
+
31
+ The library has been designed to allow easy extensions, and it currently supports the following ONNX nodes:
32
+
33
+ - Add
34
+ - AveragePool
35
+ - BatchNormalization
36
+ - Concat
37
+ - Conv
38
+ - Div
39
+ - Dropout
40
+ - Flatten
41
+ - Gemm
42
+ - Identity
43
+ - MatMul
44
+ - MaxPool
45
+ - Mul
46
+ - Relu
47
+ - Reshape
48
+ - Sub
49
+ - Unsqueeze
50
+
51
+
52
+ # Installation
53
+
54
+ We highly recommend creating a virtual conda environment and installing the library within the environment by following the following steps:
55
+
56
+ 1- Gurobi is not installed automatically. Please install it manually using:
57
+ ```
58
+ conda install -c gurobi gurobi
59
+ ```
60
+ 2- Make sure to switch to Python 11 inside your environment using:
61
+ ```
62
+ conda install python=11
63
+ ```
64
+
65
+ 3- Install the library using:
66
+ ```
67
+ pip install onnxgurobi
68
+ ```
69
+
70
+ * Probably a known fact, but the Gurobi optimizer needs to be manually
71
+ installed to the platform and a valid license has to be activated.
72
+ * As the end of 2025, Gurobi has academic licenses available - at no cost -
73
+ to students, faculty, and staff at accredited degree-granting institutions.
74
+ # Getting Started
75
+
76
+ The ```ONNXToGurobi``` class provides the central interface for converting an ONNX model into a Gurobi optimization model.
77
+
78
+ To get access to the class's methods and attributes, you need to import it using:
79
+
80
+ ```
81
+ from onnx_to_gurobi.onnxToGurobi import ONNXToGurobi
82
+ ```
83
+
84
+
85
+ The ```ONNXToGurobi``` class:
86
+
87
+ - Parses the ONNX graph and constructs an internal representation of each operator and its corresponding tensor shapes.
88
+
89
+ - Creates a Gurobi model along with the necessary variables and constraints.
90
+
91
+ - Exposes all model components (decision variables, Gurobi Model object, node definitions, tensor shapes), allowing you to:
92
+
93
+ * Set or fix input variables to specific values.
94
+
95
+ * Introduce objectives.
96
+
97
+ * Add your own constraints.
98
+
99
+ * Solve the resulting MILP and then inspect or extract the outputs from the solution.
100
+
101
+
102
+ An overview of the class’s methods and attributes:
103
+
104
+ ```
105
+ class ONNXToGurobi:
106
+ def build_model(self):
107
+ """
108
+ Constructs the Gurobi model by creating variables and applying operator constraints.
109
+
110
+ """
111
+
112
+ def get_gurobi_model(self):
113
+ """
114
+ Retrieves the Gurobi model after all constraints have been added.
115
+
116
+ Returns:
117
+ gurobipy.Model: The constructed Gurobi model reflecting the ONNX graph.
118
+ """
119
+
120
+ # Attributes:
121
+ self.model # The Gurobi Model object
122
+ self.variables # A dict mapping tensor names to Gurobi variables (or constants)
123
+ self.in_out_tensors_shapes # Shapes of all input and output tensors
124
+ self.nodes # Node definitions parsed from ONNX
125
+ self.initializers # Constant tensors extracted from the ONNX graph
126
+
127
+ ```
128
+
129
+ # How to Use
130
+
131
+ See [example1.py](./examples/example1.py) for a simple example.
132
+ See [example2.py](./examples/example2.py) or [exampe3.py](./examples/example3.py) for more detailed adversarial examples.
133
+ See [example4.py](./examples/example4.py) for the usage of the model builder, which is described down below.
134
+
135
+ ## Compatibility
136
+ To get access to the following compatiblity checks, you need to import them by:
137
+ ```
138
+ from onnx_to_gurobi.compatibility import compatibility_check, get_unsupported_note_types, check_equivalence
139
+ ```
140
+
141
+ The compatiblity.py was implemented as an interface for checking ONNXGurobi compatibility for an onnx model.
142
+ There are mainly three methods that can be used to check if an ONNX model can be parsed to a Gurobi model.
143
+
144
+ 1. For a comprehensive review, it is recommended to use the ```compatibility_check``` method. The specified ONNX file can be stored in standard .onnx or compressed .onnx.gz format and can be checked as follows:
145
+ ```
146
+ def compatibility_check(onnx_path, iterative_analysis=True, output_dir=None, save_subgraphs=True, rtol=1e-05, atol=1e-08):
147
+ """
148
+ This method is used for testing compatibility with ONNXToGurobi. Tries to parse specified ONNX model to a Gurobi model.
149
+ If successful, equivalence of an ONNX run and its corresponding Gurobi model is checked (with rtol/atol tolerance).
150
+
151
+ If not successful, an iterative analysis can be started via function argument flag. If true, every subgraph is
152
+ extracted and checked for compatibility with ONNXGurobi. This is done to identify nodes which cause misconduct,
153
+ which might be not solely attributable to the node type itself.
154
+
155
+ Args:
156
+ onnx_path: path to the initial onnx model.
157
+ iterative_analysis: flag to start iterative analysis, if initial check fails.
158
+ output_dir: name of output directory where subgraphs and log file can be stored.
159
+ save_subgraphs: flag to control, if every subgraph should be stored or be deleted after successful check.
160
+ rtol/atol: tolerances for the equivalence check (via np.allclose)
161
+ """
162
+ ```
163
+ As ONNXGurobi expects a dynamic batch dimension, the compatibility check includes adding a dynamic batch dimension and
164
+ restoring the ONNX model as '*_modified.onnx' in the directory of onnx path. If initial compatibility check fails and
165
+ iterative analysis should be done, a new directory 'subgraphs' is created at the given output path, where
166
+ the subgraphs are stored. At the same directory, a logfile.txt is written including following information for each subgraph:
167
+ ```
168
+ [NODE *]: NAME_OF_NODE
169
+ [PASSED]: Extracting current subgraph was successful, stored at 'path/of/stored/onnx_subgraph_endingnode.onnx'.
170
+ [PASSED]: Compatibility. # Indicating no error occured while parsing
171
+ [PASSED]: Equivalence check for ('path/of/stored/onnx_subgraph_endingnode.onnx') has been successful.
172
+ ```
173
+ Via the logfile the node causing misconduct can be found and its most likely reason for incompatibility.
174
+ Notes:
175
+ * If extracting current subgraph fails, there might be an error using 'onnx.utils.extract_model' being independent of ONNXToGurobi.
176
+ * It was observed that equivalence check might fail because Gurobi doesn't find a valid solution (status code -3).
177
+ A restart can sometimes still show equivalence before using the following function.
178
+
179
+ 2. For a quick check, if all nodes included in the onnx model are supported by onnx to gurobi, the ```get_unsupported_note_types```
180
+ method can be used.
181
+ ```
182
+ def get_unsupported_note_types(onnx_path):
183
+ """
184
+ Checks every node type, included in onnx path, and checks for support by the current ONNXGurobi version.
185
+ Prints and returns a list of all node types that are not supported and most likely cause incompatibility.
186
+
187
+ Args:
188
+ onnx_path: path to the onnx file.
189
+ """
190
+ ```
191
+ If the returned/printed list isn't empty, included node types are not supported by the current version.
192
+
193
+ 3. Via ```check_equivalence```, a manual compatibility test can be set up.
194
+ Therefor it is very important to determine input tensor, its shape, name and just like that the output tensor correctly. In addition, this method does not include adding a dynamic batch dimension to the onnx file. Therefore, this must be done and checked manually.
195
+ ```
196
+ check_equivalence(onnx_path, model_input, model_input_names, target_output_names, log_file_path, rtol=1e-05, atol=1e-08):
197
+ """
198
+ Runs onnx inference and ONNXToGurobi solving for given onnx model.
199
+
200
+ Args:
201
+ onnx_path: path to the onnx model.
202
+ model_input: tensor of valid input shape of specified onnx model.
203
+ model_input_names: corresponding names of model input tensor.
204
+ target_output_names: names of the output tensors from the onnx model.
205
+ ... (remaining args are similiar as above)
206
+
207
+ Returns:
208
+ True: If check has shown compatibility and equivalance for ONNX/Gurobi run on same input instance.
209
+ False: If check has shown compatibility but missing equivalence between both outputs.
210
+ None: If check has show incompatiblity, leading to an exception (most likely written to the logfile)
211
+ """
212
+ ```
213
+ Please ensure the model has a dynamic batch dimension and input/output tensors have a shape of [1, remaining dimensions].
214
+
215
+ ### Additional Notes for compatiblity checks:
216
+ * All compatibility checks mentioned are designed so that the inputs expect only one tensor. The number of outputs tensors can be greater.
217
+ * Since most parsers are designed to exclude the batch dimension, the checks are designed to have a dynamic batch dimension at axis 0. If the ONNX model expects 1D inputs, please re-export with an additional batch dimension. If the model has input dimension >1, the first dimension is interpreted as batch dimension by the aforementioned 'compatibility_check'.
218
+ * While testing aspects of the ONNXGurobi library, it has been observed, that from a certain version onwards, Pytorch might ignore the onnx op version flag while exporting the onnx file. It is essential to ensure that the ONNX file is in opset version 11.
219
+ * In addition, for ensuring things like the modified onnx file has a dynamic batch dimension, correct opset version or correct input/output names, we recommend [Netron](https://netron.app/) as a visualization tool.
220
+
221
+ ## Model Builder
222
+ In addition to using existing ONNX models, a model builder was explicitly implemented for PyTorch. At the current state
223
+ it is only capable of creating a simple fully connected neural network, consisting of nn.Linear and nn.ReLU layers.
224
+ In this way, a network can be created from the aforementioned layer types, which is compatible for the use of the ONNXGurobi library.
225
+ The following import is required for usage:
226
+
227
+ ```
228
+ from onnx_to_gurobi.modelBuilder import SimpleFCModel
229
+ ```
230
+ The use of the class ```SimpleFCModel``` is described below:
231
+ #### 1. Create instance of ```SimpleFCModel```:
232
+ ```
233
+ class SimpleFCModel(nn.Module):
234
+ def __init__(input_dim, hidden_dim, output_dim, output_activation, onnx_path = "simple_fc_model.onnx"):
235
+ """
236
+ Args:
237
+ input_dim: (1D) input dimension for the network (integer/list)
238
+ hidden_dim: list of dimensions for each hidden layer
239
+ output_dim: output dimension of the network
240
+ output_activation: nn.* activation function for the outputlayer
241
+ onnx_path: path for the onnx export
242
+ """
243
+ super().__init__()
244
+ # Setting attributes
245
+ ...
246
+
247
+ # Creating PyTorch and (ONNX)Gurobi model
248
+ self.model = self.create_model()
249
+ self.gurobi_model_builder = self.update_gurobi_model_builder()
250
+ ```
251
+ As described above, for every dimension given in the 'hidden_dims' argument a Linear + ReLU layer is added with corresponding dimension.
252
+ If no explicit activation function is given, also a ReLU activation is used for the output layer. As shown, the constructor also builds the corresponding PyTorch and Gurobi model.
253
+
254
+ Note: To export the model in ONNX format you can use ```self.export_onnx()``` method. It exports a ONNX format of the current state and also adds a dynamic batch dimension, since this is expected by the ONNXGurobi bib.
255
+
256
+ #### 2. PyTorch model via ```create_model()```:
257
+ ```
258
+ def create_model(self):
259
+ """
260
+ Called by constructor when the model is created. For each entry in self.hidden_dims a fc linear + relu layer
261
+ is added with corresponding input dimensions of the previous layer.
262
+
263
+ Returns: nn.Sequential module consisting of fc linear + relu layers
264
+ """
265
+ ```
266
+ The PyTorch model can be accessed with ```simple_fc_model.get_torch_model()```
267
+
268
+ Aside from that the class also has a train method which can be used for basic optimization.
269
+ But fore more complex training, manually optimizing and reloading the optimized parameters into the ```SimpleFCModel``` instance is recommended.
270
+ ```
271
+ def train_model(self, dataloader, optimizer, loss_fn, epochs, device="cpu"):
272
+ """
273
+ Performs (basic) optimization iterations with given arguments.
274
+
275
+ Args:
276
+ dataloader: torch dataloader containing batch inputs and ground truth outputs.
277
+ optimizer: optimizer used for training network parameters.
278
+ loss_fn: optimization criteria used for computing loss on current predictions.
279
+ epochs: number of optimization iterations.
280
+ device: device where the optimization is performed.
281
+ """
282
+ ...
283
+ ```
284
+ Note: If you want to use the internal Gurobi model, the train method described above also calls ```update_gurobi_model()```, which is described in the following.
285
+ If you had optimized and reloaded the model parameters and still want to use the internal Gurobi model, always call ```update_gurobi_model()``` manually.
286
+ Otherwise the Gurobi model is not updated with the new parameters.
287
+
288
+ #### 3. Internal ONNXToGurobi model
289
+ The ```self.gurobi_model_builder``` is an instance of the ```ONNXToGurobi``` class, which was described earlier in this guide.
290
+ The attribute always stores an ONNXGurobi instance of the most recent state of the network.
291
+ To be consistent the ```update_gurobi_model()``` should be called, whenever changes to model parameters are made.
292
+
293
+ The Gurobi model can be accessed via:
294
+ ```
295
+ gurobi_model = simple_fc_model.gurobi_model_builder.get_gurobi_model()
296
+
297
+ gurobi_input_vars = simple_fc_model.get_gurobi_input_vars()
298
+ gurobi_output_vars = simple_fc_model.get_gurobi_output_vars()
299
+
300
+ # Manually assign input assignments and output constraints
301
+ ...
302
+ ```
303
+ Required information like input names etc. can be accessed directly from the ```gurobi_model_builder```. You may want to retrieve the internal Gurobi model, but not to make every change or additional condition stored in the network's Gurobi model.
304
+ Therefor use ```gurobi_model.copy()``` to create a new object.
305
+
306
+ In addition to that, the method ```get_gurobi_with_input_assignment(...)``` was implemented to get a copy of the current internal Gurobi model with additional (basic) input assignment:
307
+ ```
308
+ def get_gurobi_with_input_assignment(self, input_data, eps=0.0):
309
+ """
310
+ Can be used to simply assign an input tensor to its corresponding input variables of the internal Gurobi model.
311
+ The assignment restricts the input variables to an epsilon environment of the respective, specific input value.
312
+ So for each input variable, constraints are added, representing input - eps <= input_var <= input + eps.
313
+
314
+ Args:
315
+ input_data: tensor holding the specific input values
316
+ eps: threshold to define the width of the eps. environment around specific input value
317
+
318
+ Returns:
319
+ gurobi_model: A copy! of the current Gurobi instance, supplemented by the input constraints described above.
320
+ """
321
+ ```
322
+ Note: Since this method returns a new instance, every change or new constraint made to the returned gurobi model will not further affect the internal Gurobi model of the ```SimpleFCModel``` instance!
323
+
324
+ #### 4. Usage of ```SimpleFCModel```
325
+ Although [example4.py](./examples/example4.py) already shows the use of the class, a small example follows to illustrate a typical process:
326
+ ```
327
+ simple_fc_model = SimpleFCModel(
328
+ input_dim=32,
329
+ hidden_dim=[128, 64],
330
+ output_dim=10,
331
+ output_activation=nn.ReLU(),
332
+ onnx_path="dummy_networks.onnx"
333
+ )
334
+ torch_model = simple_fc_model.get_torch_model()
335
+
336
+ # In case you might manually optimize the network extern, reload the weights and
337
+ # manually update the internal Gurobi model builder!
338
+ torch.save(torch_model.state_dict(), "path_to_optimized_weights.pth")
339
+ # ...
340
+ torch_model.load_state_dict(torch.load("path_to_optimized_weights.pth", weights_only=True))
341
+ _ = simple_fc_model.update_gurobi_model_builder()
342
+
343
+ # Generate dummy input with additional batch dimension!
344
+ dummy_input = torch.randn([1] + simple_fc_model.input_dim)
345
+
346
+ gurobi_input_vars = simple_fc_model.get_gurobi_input_vars()
347
+ gurobi_output_vars = simple_fc_model.get_gurobi_output_vars()
348
+ gurobi_model = simple_fc_model.get_gurobi_with_input_assignment(dummy_input, eps=1e-4)
349
+
350
+ for idx, output_var in gurobi_output_vars.items():
351
+ # Since gurobi model is a new instance, accessing vars is done by the names, since they are unchanged
352
+ var_name = output_var.VarName
353
+ var = gurobi_model.getVarByName(var_name)
354
+
355
+ gurobi_model.addConstr(
356
+ var >= 0,
357
+ name="just_a_dummy_constraint"
358
+ )
359
+
360
+ gurobi_model.optimize()
361
+ ```
362
+
363
+ # Important Notes
364
+
365
+ * Make sure your model is exported into ONNX using opset version 11.
366
+ * The library doesn't support recurrent neural networks (RNNs).
367
+ * The 3-D convolutional operation isn't supported.
368
+ * If an initializer/constant (e.g. weights) is used as an input to the MatMul, the node expects it to be the second input.
369
+ * The Concat node’s output must match the input shape of the following layer. In addition, the node expects only 2 inputs.
370
+ * Since our library is designed solely for production and not for training, we encode the Dropout node to
371
+ function only in inference mode, which means that its input passes through unchanged.