bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,638 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Factory functions for all openvino ops."""
6
+ from typing import Callable, Iterable, List, Optional, Set, Union
7
+
8
+ import numpy as np
9
+ from functools import partial
10
+
11
+ from openvino.runtime import Node, Shape
12
+ from openvino.runtime.op import Constant, Parameter
13
+ from openvino.runtime.opset_utils import _get_node_factory
14
+ from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op
15
+ from openvino.runtime.utils.input_validation import (
16
+ assert_list_of_ints,
17
+ check_valid_attributes,
18
+ is_non_negative_value,
19
+ is_positive_value,
20
+ )
21
+ from openvino.runtime.utils.node_factory import NodeFactory
22
+ from openvino.runtime.utils.types import (
23
+ NodeInput,
24
+ NumericData,
25
+ NumericType,
26
+ ScalarData,
27
+ TensorShape,
28
+ as_node,
29
+ as_nodes,
30
+ get_dtype,
31
+ get_element_type,
32
+ get_element_type_str,
33
+ make_constant_node,
34
+ )
35
+
36
+ _get_node_factory_opset3 = partial(_get_node_factory, "opset3")
37
+
38
+ # -------------------------------------------- ops ------------------------------------------------
39
+
40
+
41
+ @nameable_op
42
+ def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
43
+ """Return a node which produces the Assign operation.
44
+
45
+ :param new_value: Node producing a value to be assigned to a variable.
46
+ :param variable_id: Id of a variable to be updated.
47
+ :param name: Optional name for output node.
48
+ :return: Assign node
49
+ """
50
+ return _get_node_factory_opset3().create(
51
+ "Assign",
52
+ [as_node(new_value, name=name)],
53
+ {"variable_id": variable_id},
54
+ )
55
+
56
+
57
+ @nameable_op
58
+ def broadcast(
59
+ data: NodeInput,
60
+ target_shape: NodeInput,
61
+ axes_mapping: Optional[NodeInput] = None,
62
+ broadcast_spec: str = "NUMPY",
63
+ name: Optional[str] = None,
64
+ ) -> Node:
65
+ """Create a node which broadcasts the input node's values along specified axes to a desired shape.
66
+
67
+ :param data: The node with input tensor data.
68
+ :param target_shape: The node with a new shape we want to broadcast tensor to.
69
+ :param axes_mapping: The node with a axis positions (0-based) in the result
70
+ that are being broadcast.
71
+ :param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes
72
+ to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL.
73
+ :param name: Optional new name for output node.
74
+ :return: New node with broadcast shape.
75
+ """
76
+ inputs = as_nodes(data, target_shape, name=name)
77
+ if broadcast_spec.upper() == "EXPLICIT":
78
+ inputs.append(as_node(axes_mapping, name=name))
79
+ return _get_node_factory_opset3().create(
80
+ "Broadcast",
81
+ inputs,
82
+ {"mode": broadcast_spec.upper()},
83
+ )
84
+
85
+
86
+ @nameable_op
87
+ def bucketize(
88
+ data: Node,
89
+ buckets: NodeInput,
90
+ output_type: str = "i64",
91
+ with_right_bound: bool = True,
92
+ name: Optional[str] = None,
93
+ ) -> Node:
94
+ """Return a node which produces the Bucketize operation.
95
+
96
+ :param data: Input data to bucketize
97
+ :param buckets: 1-D of sorted unique boundaries for buckets
98
+ :param output_type: Output tensor type, "i64" or "i32", defaults to i64
99
+ :param with_right_bound: indicates whether bucket includes the right or left
100
+ edge of interval. default true = includes right edge
101
+ :param name: Optional name for output node.
102
+ :return: Bucketize node
103
+ """
104
+ return _get_node_factory_opset3().create(
105
+ "Bucketize",
106
+ [data, as_node(buckets, name=name)],
107
+ {"output_type": output_type, "with_right_bound": with_right_bound},
108
+ )
109
+
110
+
111
+ @nameable_op
112
+ def cum_sum(
113
+ arg: NodeInput,
114
+ axis: NodeInput,
115
+ exclusive: bool = False,
116
+ reverse: bool = False,
117
+ name: Optional[str] = None,
118
+ ) -> Node:
119
+ """Construct a cumulative summation operation.
120
+
121
+ :param arg: The tensor to be summed.
122
+ :param axis: zero dimension tensor specifying axis position along which sum will be performed.
123
+ :param exclusive: if set to true, the top element is not included
124
+ :param reverse: if set to true, will perform the sums in reverse direction
125
+ :return: New node performing the operation
126
+ """
127
+ return _get_node_factory_opset3().create(
128
+ "CumSum",
129
+ as_nodes(arg, axis, name=name),
130
+ {"exclusive": exclusive, "reverse": reverse},
131
+ )
132
+
133
+
134
+ @nameable_op
135
+ def embedding_bag_offsets_sum(
136
+ emb_table: Node,
137
+ indices: NodeInput,
138
+ offsets: NodeInput,
139
+ default_index: Optional[NodeInput] = None,
140
+ per_sample_weights: Optional[NodeInput] = None,
141
+ name: Optional[str] = None,
142
+ ) -> Node:
143
+ """Return a node which performs sums of bags of embeddings without the intermediate embeddings.
144
+
145
+ :param emb_table: Tensor containing the embedding lookup table.
146
+ :param indices: Tensor with indices.
147
+ :param offsets: Tensor containing the starting index positions of each bag in indices.
148
+ :param per_sample_weights: Tensor with weights for each sample.
149
+ :param default_index: Scalar containing default index in embedding table to fill empty bags.
150
+ :param name: Optional name for output node.
151
+ :return: The new node which performs EmbeddingBagOffsetsSum
152
+ """
153
+ inputs = [emb_table, as_node(indices, name=name), as_node(offsets, name=name)]
154
+ if per_sample_weights is not None:
155
+ inputs.append(default_index)
156
+ inputs.append(per_sample_weights)
157
+ elif default_index is not None:
158
+ inputs.append(default_index)
159
+
160
+ return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {})
161
+
162
+
163
+ @nameable_op
164
+ def embedding_bag_packed_sum(
165
+ emb_table: NodeInput,
166
+ indices: NodeInput,
167
+ per_sample_weights: Optional[NodeInput] = None,
168
+ name: Optional[str] = None,
169
+ ) -> Node:
170
+ """Return an EmbeddingBagPackedSum node.
171
+
172
+ EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
173
+ input tensor with a row (from the weights matrix) at that index
174
+
175
+ :param emb_table: Tensor containing the embedding lookup table.
176
+ :param indices: Tensor with indices.
177
+ :param per_sample_weights: Weights to be multiplied with embedding table.
178
+ :param name: Optional name for output node.
179
+ :return: EmbeddingBagPackedSum node
180
+ """
181
+ inputs = [as_node(emb_table, name=name), as_node(indices, name=name)]
182
+ if per_sample_weights is not None:
183
+ inputs.append(as_node(per_sample_weights, name=name))
184
+
185
+ return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {})
186
+
187
+
188
+ @nameable_op
189
+ def embedding_segments_sum(
190
+ emb_table: Node,
191
+ indices: NodeInput,
192
+ segment_ids: NodeInput,
193
+ num_segments: Optional[NodeInput] = None,
194
+ default_index: Optional[NodeInput] = None,
195
+ per_sample_weights: Optional[NodeInput] = None,
196
+ name: Optional[str] = None,
197
+ ) -> Node:
198
+ """Return an EmbeddingSegmentsSum node.
199
+
200
+ EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
201
+ input tensor with a row (from the weights matrix) at that index
202
+
203
+ :param emb_table: Tensor containing the embedding lookup table.
204
+ :param indices: Tensor with indices.
205
+ :param segment_ids: Tensor with indices into the output Tensor
206
+ :param num_segments: Tensor with number of segments.
207
+ :param default_index: Scalar containing default index in embedding table to fill empty bags.
208
+ :param per_sample_weights: Weights to be multiplied with embedding table.
209
+ :param name: Optional name for output node.
210
+ :return: EmbeddingSegmentsSum node
211
+ """
212
+ inputs = [as_node(emb_table, name=name), as_node(indices, name=name), as_node(segment_ids, name=name)]
213
+ if per_sample_weights is not None:
214
+ inputs.append(as_node(num_segments, name=name))
215
+ inputs.append(as_node(default_index, name=name))
216
+ inputs.append(as_node(per_sample_weights, name=name))
217
+ elif default_index is not None:
218
+ inputs.append(as_node(num_segments, name=name))
219
+ inputs.append(as_node(default_index, name=name))
220
+ elif num_segments is not None:
221
+ inputs.append(as_node(num_segments, name=name))
222
+
223
+ return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {})
224
+
225
+
226
+ @nameable_op
227
+ def extract_image_patches(
228
+ image: NodeInput,
229
+ sizes: TensorShape,
230
+ strides: List[int],
231
+ rates: TensorShape,
232
+ auto_pad: str,
233
+ name: Optional[str] = None,
234
+ ) -> Node:
235
+ """Return a node which produces the ExtractImagePatches operation.
236
+
237
+ :param image: 4-D Input data to extract image patches.
238
+ :param sizes: Patch size in the format of [size_rows, size_cols].
239
+ :param strides: Patch movement stride in the format of [stride_rows, stride_cols]
240
+ :param rates: Element seleciton rate for creating a patch.
241
+ :param auto_pad: Padding type.
242
+ :param name: Optional name for output node.
243
+ :return: ExtractImagePatches node
244
+ """
245
+ return _get_node_factory_opset3().create(
246
+ "ExtractImagePatches",
247
+ [as_node(image, name=name)],
248
+ {"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad},
249
+ )
250
+
251
+
252
+ @nameable_op
253
+ def gru_cell(
254
+ X: NodeInput,
255
+ initial_hidden_state: NodeInput,
256
+ W: NodeInput,
257
+ R: NodeInput,
258
+ B: NodeInput,
259
+ hidden_size: int,
260
+ activations: Optional[List[str]] = None,
261
+ activations_alpha: Optional[List[float]] = None,
262
+ activations_beta: Optional[List[float]] = None,
263
+ clip: float = 0.0,
264
+ linear_before_reset: bool = False,
265
+ name: Optional[str] = None,
266
+ ) -> Node:
267
+ """Perform GRUCell operation on the tensor from input node.
268
+
269
+ GRUCell represents a single GRU Cell that computes the output
270
+ using the formula described in the paper: https://arxiv.org/abs/1406.1078
271
+
272
+ Note this class represents only single *cell* and not whole *layer*.
273
+
274
+ :param X: The input tensor with shape: [batch_size, input_size].
275
+ :param initial_hidden_state: The hidden state tensor at current time step with shape:
276
+ [batch_size, hidden_size].
277
+ :param W: The weights for matrix multiplication, gate order: zrh.
278
+ Shape: [3*hidden_size, input_size].
279
+ :param R: The recurrence weights for matrix multiplication.
280
+ Shape: [3*hidden_size, hidden_size].
281
+ :param B: The sum of biases (weight and recurrence).
282
+ For linear_before_reset set True the shape is [4*hidden_size].
283
+ Otherwise the shape is [3*hidden_size].
284
+ :param hidden_size: The number of hidden units for recurrent cell.
285
+ Specifies hidden state size.
286
+ :param activations: The vector of activation functions used inside recurrent cell.
287
+ :param activation_alpha: The vector of alpha parameters for activation functions in
288
+ order respective to activation list.
289
+ :param activation_beta: The vector of beta parameters for activation functions in order
290
+ respective to activation list.
291
+ :param clip: The value defining clipping range [-clip, clip] on input of
292
+ activation functions.
293
+ :param linear_before_reset: Flag denotes if the layer behaves according to the modification
294
+ of GRUCell described in the formula in the ONNX documentation.
295
+ :param name: Optional output node name.
296
+ :return: The new node performing a GRUCell operation on tensor from input node.
297
+ """
298
+ if activations is None:
299
+ activations = ["sigmoid", "tanh"]
300
+ if activations_alpha is None:
301
+ activations_alpha = []
302
+ if activations_beta is None:
303
+ activations_beta = []
304
+
305
+ input_nodes = as_nodes(X, initial_hidden_state, W, R, B, name=name)
306
+ attributes = {
307
+ "hidden_size": hidden_size,
308
+ "activations": activations,
309
+ "activations_alpha": activations_alpha,
310
+ "activations_beta": activations_beta,
311
+ "linear_before_reset": linear_before_reset,
312
+ "clip": clip,
313
+ }
314
+ return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes)
315
+
316
+
317
+ @nameable_op
318
+ def non_max_suppression(
319
+ boxes: NodeInput,
320
+ scores: NodeInput,
321
+ max_output_boxes_per_class: Optional[NodeInput] = None,
322
+ iou_threshold: Optional[NodeInput] = None,
323
+ score_threshold: Optional[NodeInput] = None,
324
+ box_encoding: str = "corner",
325
+ sort_result_descending: bool = True,
326
+ output_type: str = "i64",
327
+ name: Optional[str] = None,
328
+ ) -> Node:
329
+ """Return a node which performs NonMaxSuppression.
330
+
331
+ :param boxes: Tensor with box coordinates.
332
+ :param scores: Tensor with box scores.
333
+ :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
334
+ to be selected per class.
335
+ :param iou_threshold: Tensor specifying intersection over union threshold
336
+ :param score_threshold: Tensor specifying minimum score to consider box for the processing.
337
+ :param box_encoding: Format of boxes data encoding.
338
+ :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
339
+ boxes across batches or not.
340
+ :param output_type: Output element type.
341
+ :return: The new node which performs NonMaxSuppression
342
+ """
343
+ if max_output_boxes_per_class is None:
344
+ max_output_boxes_per_class = make_constant_node(0, np.int64)
345
+ if iou_threshold is None:
346
+ iou_threshold = make_constant_node(0, np.float32)
347
+ if score_threshold is None:
348
+ score_threshold = make_constant_node(0, np.float32)
349
+
350
+ inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, name=name)
351
+ attributes = {
352
+ "box_encoding": box_encoding,
353
+ "sort_result_descending": sort_result_descending,
354
+ "output_type": output_type,
355
+ }
356
+
357
+ return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes)
358
+
359
+
360
+ @nameable_op
361
+ def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node:
362
+ """Return the indices of the elements that are non-zero.
363
+
364
+ :param data: Input data.
365
+ :param output_type: Output tensor type.
366
+
367
+ :return: The new node which performs NonZero
368
+ """
369
+ return _get_node_factory_opset3().create(
370
+ "NonZero",
371
+ [as_node(data, name=name)],
372
+ {"output_type": output_type},
373
+ )
374
+
375
+
376
+ @nameable_op
377
+ def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
378
+ """Return a node which produces the Assign operation.
379
+
380
+ :param init_value: Node producing a value to be returned instead of an unassigned variable.
381
+ :param variable_id: Id of a variable to be read.
382
+ :param name: Optional name for output node.
383
+ :return: ReadValue node
384
+ """
385
+ return _get_node_factory_opset3().create(
386
+ "ReadValue",
387
+ [as_node(init_value, name=name)],
388
+ {"variable_id": variable_id},
389
+ )
390
+
391
+
392
+ @nameable_op
393
+ def rnn_cell(
394
+ X: NodeInput,
395
+ initial_hidden_state: NodeInput,
396
+ W: NodeInput,
397
+ R: NodeInput,
398
+ B: NodeInput,
399
+ hidden_size: int,
400
+ activations: List[str],
401
+ activations_alpha: List[float],
402
+ activations_beta: List[float],
403
+ clip: float = 0.0,
404
+ name: Optional[str] = None,
405
+ ) -> Node:
406
+ """Perform RNNCell operation on tensor from input node.
407
+
408
+ It follows notation and equations defined as in ONNX standard:
409
+ https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN
410
+
411
+ Note this class represents only single *cell* and not whole RNN *layer*.
412
+
413
+ :param X: The input tensor with shape: [batch_size, input_size].
414
+ :param initial_hidden_state: The hidden state tensor at current time step with shape:
415
+ [batch_size, hidden_size].
416
+ :param W: The weight tensor with shape: [hidden_size, input_size].
417
+ :param R: The recurrence weight tensor with shape: [hidden_size,
418
+ hidden_size].
419
+ :param B: The sum of biases (weight and recurrence) with shape: [hidden_size].
420
+ :param hidden_size: The number of hidden units for recurrent cell.
421
+ Specifies hidden state size.
422
+ :param activations: The vector of activation functions used inside recurrent cell.
423
+ :param activation_alpha: The vector of alpha parameters for activation functions in
424
+ order respective to activation list.
425
+ :param activation_beta: The vector of beta parameters for activation functions in order
426
+ respective to activation list.
427
+ :param clip: The value defining clipping range [-clip, clip] on input of
428
+ activation functions.
429
+ :param name: Optional output node name.
430
+ :return: The new node performing a RNNCell operation on tensor from input node.
431
+ """
432
+ if activations is None:
433
+ activations = ["tanh"]
434
+ if activations_alpha is None:
435
+ activations_alpha = []
436
+ if activations_beta is None:
437
+ activations_beta = []
438
+
439
+ input_nodes = as_nodes(X, initial_hidden_state, W, R, B, name=name)
440
+ attributes = {
441
+ "hidden_size": hidden_size,
442
+ "activations": activations,
443
+ "activations_alpha": activations_alpha,
444
+ "activations_beta": activations_beta,
445
+ "clip": clip,
446
+ }
447
+ return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes)
448
+
449
+
450
+ @nameable_op
451
+ def roi_align(
452
+ data: NodeInput,
453
+ rois: NodeInput,
454
+ batch_indices: NodeInput,
455
+ pooled_h: int,
456
+ pooled_w: int,
457
+ sampling_ratio: int,
458
+ spatial_scale: float,
459
+ mode: str,
460
+ name: Optional[str] = None,
461
+ ) -> Node:
462
+ """Return a node which performs ROIAlign.
463
+
464
+ :param data: Input data.
465
+ :param rois: RoIs (Regions of Interest) to pool over.
466
+ :param batch_indices: Tensor with each element denoting the index of
467
+ the corresponding image in the batch.
468
+ :param pooled_h: Height of the ROI output feature map.
469
+ :param pooled_w: Width of the ROI output feature map.
470
+ :param sampling_ratio: Number of bins over height and width to use to calculate
471
+ each output feature map element.
472
+ :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates.
473
+ :param mode: Method to perform pooling to produce output feature map elements.
474
+
475
+ :return: The new node which performs ROIAlign
476
+ """
477
+ inputs = as_nodes(data, rois, batch_indices, name=name)
478
+ attributes = {
479
+ "pooled_h": pooled_h,
480
+ "pooled_w": pooled_w,
481
+ "sampling_ratio": sampling_ratio,
482
+ "spatial_scale": spatial_scale,
483
+ "mode": mode,
484
+ }
485
+ return _get_node_factory_opset3().create("ROIAlign", inputs, attributes)
486
+
487
+
488
+ @nameable_op
489
+ def scatter_elements_update(
490
+ data: NodeInput,
491
+ indices: NodeInput,
492
+ updates: NodeInput,
493
+ axis: NodeInput,
494
+ name: Optional[str] = None,
495
+ ) -> Node:
496
+ """Return a node which produces a ScatterElementsUpdate operation.
497
+
498
+ :param data: The input tensor to be updated.
499
+ :param indices: The tensor with indexes which will be updated.
500
+ :param updates: The tensor with update values.
501
+ :param axis: The axis for scatter.
502
+ :return: ScatterElementsUpdate node
503
+
504
+ ScatterElementsUpdate creates a copy of the first input tensor with updated elements
505
+ specified with second and third input tensors.
506
+
507
+ For each entry in `updates`, the target index in `data` is obtained by combining
508
+ the corresponding entry in `indices` with the index of the entry itself: the
509
+ index-value for dimension equal to `axis` is obtained from the value of the
510
+ corresponding entry in `indices` and the index-value for dimension not equal
511
+ to `axis` is obtained from the index of the entry itself.
512
+
513
+ """
514
+ return _get_node_factory_opset3().create(
515
+ "ScatterElementsUpdate",
516
+ as_nodes(data, indices, updates, axis, name=name),
517
+ )
518
+
519
+
520
+ @nameable_op
521
+ def scatter_update(
522
+ data: Node,
523
+ indices: NodeInput,
524
+ updates: NodeInput,
525
+ axis: NodeInput,
526
+ name: Optional[str] = None,
527
+ ) -> Node:
528
+ """Return a node which produces a ScatterUpdate operation.
529
+
530
+ ScatterUpdate sets new values to slices from data addressed by indices.
531
+
532
+ :param data: The input tensor to be updated.
533
+ :param indices: The tensor with indexes which will be updated.
534
+ :param updates: The tensor with update values.
535
+ :param axis: The axis at which elements will be updated.
536
+ :return: ScatterUpdate node
537
+ """
538
+ return _get_node_factory_opset3().create(
539
+ "ScatterUpdate",
540
+ as_nodes(data, indices, updates, axis, name=name),
541
+ )
542
+
543
+
544
+ @nameable_op
545
+ def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node:
546
+ """Return a node which produces a tensor containing the shape of its input data.
547
+
548
+ :param data: The tensor containing the input data.
549
+ :param output_type: Output element type.
550
+ :return: ShapeOf node
551
+ """
552
+ return _get_node_factory_opset3().create(
553
+ "ShapeOf",
554
+ [as_node(data, name=name)],
555
+ {"output_type": output_type},
556
+ )
557
+
558
+
559
+ @nameable_op
560
+ def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = None) -> Node:
561
+ """Perform permutation on data in the channel dimension of the input tensor.
562
+
563
+ :param data: The node with input tensor.
564
+ :param axis: Channel dimension index in the data tensor.
565
+ A negative value means that the index should be calculated
566
+ from the back of the input data shape.
567
+ :param group: The channel dimension specified by the axis parameter
568
+ should be split into this number of groups.
569
+ :param name: Optional output node name.
570
+ :return: The new node performing a permutation on data in the channel dimension
571
+ of the input tensor.
572
+
573
+ The operation is the equivalent with the following transformation of the input tensor
574
+ `data` of shape [N, C, H, W]:
575
+
576
+ `data_reshaped` = reshape(`data`, [N, group, C / group, H * W])
577
+
578
+ `data_transposed` = transpose(`data_reshaped`, [0, 2, 1, 3])
579
+
580
+ `output` = reshape(`data_transposed`, [N, C, H, W])
581
+
582
+ For example:
583
+
584
+ .. code-block:: python
585
+
586
+ Inputs: tensor of shape [1, 6, 2, 2]
587
+
588
+ data = [[[[ 0., 1.], [ 2., 3.]],
589
+ [[ 4., 5.], [ 6., 7.]],
590
+ [[ 8., 9.], [10., 11.]],
591
+ [[12., 13.], [14., 15.]],
592
+ [[16., 17.], [18., 19.]],
593
+ [[20., 21.], [22., 23.]]]]
594
+
595
+ axis = 1
596
+ groups = 3
597
+
598
+ Output: tensor of shape [1, 6, 2, 2]
599
+
600
+ output = [[[[ 0., 1.], [ 2., 3.]],
601
+ [[ 8., 9.], [10., 11.]],
602
+ [[16., 17.], [18., 19.]],
603
+ [[ 4., 5.], [ 6., 7.]],
604
+ [[12., 13.], [14., 15.]],
605
+ [[20., 21.], [22., 23.]]]]
606
+ """
607
+ return _get_node_factory_opset3().create(
608
+ "ShuffleChannels",
609
+ [as_node(data, name=name)],
610
+ {"axis": axis, "group": group},
611
+ )
612
+
613
+
614
+ @nameable_op
615
+ def topk(
616
+ data: NodeInput,
617
+ k: NodeInput,
618
+ axis: int,
619
+ mode: str,
620
+ sort: str,
621
+ index_element_type: str = "i32",
622
+ name: Optional[str] = None,
623
+ ) -> Node:
624
+ """Return a node which performs TopK.
625
+
626
+ :param data: Input data.
627
+ :param k: K.
628
+ :param axis: TopK Axis.
629
+ :param mode: Compute TopK largest ('max') or smallest ('min')
630
+ :param sort: Order of output elements (sort by: 'none', 'index' or 'value')
631
+ :param index_element_type: Type of output tensor with indices.
632
+ :return: The new node which performs TopK (both indices and values)
633
+ """
634
+ return _get_node_factory_opset3().create(
635
+ "TopK",
636
+ as_nodes(data, k, name=name),
637
+ {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type},
638
+ )