bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,3068 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Factory functions for all openvino ops."""
6
+ from typing import List, Optional, Union, get_args
7
+
8
+ import numpy as np
9
+ from functools import partial
10
+
11
+ from openvino.runtime import Node, PartialShape, Type
12
+ from openvino.runtime.op import Constant, Parameter, tensor_iterator
13
+ from openvino.runtime.opset_utils import _get_node_factory
14
+ from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op
15
+ from openvino.runtime.utils.input_validation import (
16
+ check_valid_attributes,
17
+ is_non_negative_value,
18
+ is_positive_value,
19
+ )
20
+ from openvino.runtime.utils.node_factory import NodeFactory
21
+ from openvino.runtime.utils.types import (
22
+ NodeInput,
23
+ NumericData,
24
+ NumericType,
25
+ ScalarData,
26
+ TensorShape,
27
+ as_node,
28
+ as_nodes,
29
+ get_dtype,
30
+ get_element_type,
31
+ get_element_type_str,
32
+ make_constant_node,
33
+ )
34
+ from openvino.utils import deprecated
35
+
36
+ _get_node_factory_opset1 = partial(_get_node_factory, "opset1")
37
+
38
+ # -------------------------------------------- ops ------------------------------------------------
39
+
40
+
41
+ @unary_op
42
+ def absolute(node: NodeInput, name: Optional[str] = None) -> Node:
43
+ """Return node which applies f(x) = abs(x) to the input node element-wise.
44
+
45
+ :param node: One of: input node, array or scalar.
46
+ :param name: Optional new name for output node.
47
+ :return: New node with Abs operation applied on it.
48
+ """
49
+ return _get_node_factory_opset1().create("Abs", [node])
50
+
51
+
52
+ @unary_op
53
+ def acos(node: NodeInput, name: Optional[str] = None) -> Node:
54
+ """Apply inverse cosine function on the input node element-wise.
55
+
56
+ :param node: One of: input node, array or scalar.
57
+ :param name: Optional new name for output node.
58
+ :return: New node with arccos operation applied on it.
59
+ """
60
+ return _get_node_factory_opset1().create("Acos", [node])
61
+
62
+
63
+ @binary_op
64
+ def add(
65
+ left_node: NodeInput,
66
+ right_node: NodeInput,
67
+ auto_broadcast: str = "NUMPY",
68
+ name: Optional[str] = None,
69
+ ) -> Node:
70
+ """Return node which applies f(A,B) = A+B to the input nodes element-wise.
71
+
72
+ :param left_node: The first input node for add operation.
73
+ :param right_node: The second input node for add operation.
74
+ :param auto_broadcast: The type of broadcasting specifies rules used for
75
+ auto-broadcasting of input tensors. Defaults to "NUMPY".
76
+ :param name: The optional name for output new node.
77
+ :return: The node performing element-wise addition.
78
+ """
79
+ return _get_node_factory_opset1().create(
80
+ "Add",
81
+ [left_node, right_node],
82
+ {"auto_broadcast": auto_broadcast.upper()},
83
+ )
84
+
85
+
86
+ @unary_op
87
+ def asin(node: NodeInput, name: Optional[str] = None) -> Node:
88
+ """Apply inverse sine function on the input node element-wise.
89
+
90
+ :param node: One of: input node, array or scalar.
91
+ :param name: Optional new name for output node.
92
+ :return: New node with arcsin operation applied on it.
93
+ """
94
+ return _get_node_factory_opset1().create("Asin", [node])
95
+
96
+
97
+ @unary_op
98
+ def atan(node: NodeInput, name: Optional[str] = None) -> Node:
99
+ """Apply inverse tangent function on the input node element-wise.
100
+
101
+ :param node: One of: input node, array or scalar.
102
+ :param name: Optional new name for output node.
103
+ :return: New node with arctan operation applied on it.
104
+ """
105
+ return _get_node_factory_opset1().create("Atan", [node])
106
+
107
+
108
+ @nameable_op
109
+ def avg_pool(
110
+ data_batch: NodeInput,
111
+ strides: List[int],
112
+ pads_begin: TensorShape,
113
+ pads_end: TensorShape,
114
+ kernel_shape: TensorShape,
115
+ exclude_pad: bool,
116
+ rounding_type: str = "floor",
117
+ auto_pad: Optional[str] = None,
118
+ name: Optional[str] = None,
119
+ ) -> Node:
120
+ """Return average pooling node.
121
+
122
+ :param data_batch: The input node providing data.
123
+ :param strides: The window movement strides.
124
+ :param pads_begin: The input data optional padding below filled with zeros.
125
+ :param pads_end: The input data optional padding below filled with zeros.
126
+ :param kernel_shape: The pooling window shape.
127
+ :param exclude_pad: Whether or not to include zero padding in average computations.
128
+ :param rounding_type: Determines used rounding schema when computing output shape. Acceptable
129
+ values are: ['floor', 'ceil']
130
+ :param auto_pad: Determines how the padding is calculated. Acceptable values:
131
+ [None, 'same_upper', 'same_lower', 'valid']
132
+ :param name: Optional name for the new output node.
133
+
134
+ :return: New node with AvgPool operation applied on its data.
135
+ """
136
+ if auto_pad is None:
137
+ auto_pad = "explicit"
138
+ return _get_node_factory_opset1().create(
139
+ "AvgPool",
140
+ [as_node(data_batch, name=name)],
141
+ {
142
+ "strides": strides,
143
+ "pads_begin": pads_begin,
144
+ "pads_end": pads_end,
145
+ "kernel": kernel_shape,
146
+ "exclude-pad": exclude_pad,
147
+ "rounding_type": rounding_type.upper(),
148
+ "auto_pad": auto_pad.upper(),
149
+ },
150
+ )
151
+
152
+
153
+ @nameable_op
154
+ def batch_norm_inference(
155
+ data: NodeInput,
156
+ gamma: NodeInput,
157
+ beta: NodeInput,
158
+ mean: NodeInput,
159
+ variance: NodeInput,
160
+ epsilon: float,
161
+ name: Optional[str] = None,
162
+ ) -> Node:
163
+ """Perform layer normalizes a input tensor by mean and variance with appling scale and offset.
164
+
165
+ :param data: The input tensor with data for normalization.
166
+ :param gamma: The scalar scaling for normalized value.
167
+ :param beta: The bias added to the scaled normalized value.
168
+ :param mean: The value for mean normalization.
169
+ :param variance: The value for variance normalization.
170
+ :param epsilon: The number to be added to the variance to avoid division
171
+ by zero when normalizing a value.
172
+ :param name: The optional name of the output node.
173
+ :return: The new node which performs BatchNormInference.
174
+ """
175
+ inputs = as_nodes(gamma, beta, data, mean, variance, name=name)
176
+ return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon})
177
+
178
+
179
+ @nameable_op
180
+ def binary_convolution(
181
+ data: NodeInput,
182
+ filters: NodeInput,
183
+ strides: List[int],
184
+ pads_begin: List[int],
185
+ pads_end: List[int],
186
+ dilations: List[int],
187
+ mode: str,
188
+ pad_value: float,
189
+ auto_pad: str = "EXPLICIT",
190
+ name: Optional[str] = None,
191
+ ) -> Node:
192
+ """Create node performing convolution with binary weights, binary input and integer output.
193
+
194
+ :param data: The node providing data batch tensor.
195
+ :param filter: The node providing filters tensor.
196
+ :param strides: The kernel window movement strides.
197
+ :param pads_begin: The number of pixels to add to the beginning along each axis.
198
+ :param pads_end: The number of pixels to add to the end along each axis.
199
+ :param dilations: The distance in width and height between elements (weights) in the filter.
200
+ :param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted.
201
+ :param pad_value: Floating-point value used to fill pad area.
202
+ :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
203
+ :param name: The optional new name for output node.
204
+ :return: New node performing binary convolution operation.
205
+ """
206
+ return _get_node_factory_opset1().create(
207
+ "BinaryConvolution",
208
+ as_nodes(data, filters, name=name),
209
+ {
210
+ "strides": strides,
211
+ "pads_begin": pads_begin,
212
+ "pads_end": pads_end,
213
+ "dilations": dilations,
214
+ "mode": mode,
215
+ "pad_value": pad_value,
216
+ "auto_pad": auto_pad,
217
+ },
218
+ )
219
+
220
+
221
+ @nameable_op
222
+ def broadcast(
223
+ data: NodeInput,
224
+ target_shape: NodeInput,
225
+ axes_mapping: Optional[NodeInput] = None,
226
+ mode: str = "NUMPY",
227
+ name: Optional[str] = None,
228
+ ) -> Node:
229
+ """Create a node which broadcasts the input node's values along specified axes to a desired shape.
230
+
231
+ :param data: The node with input tensor data.
232
+ :param target_shape: The node with a new shape we want to broadcast tensor to.
233
+ :param axes_mapping: The node with a axis positions (0-based) in the result
234
+ that are being broadcast.
235
+ :param mode: The type of broadcasting that specifies mapping of input tensor axes
236
+ to output shape axes. Range of values: NUMPY, EXPLICIT.
237
+ :param name: Optional new name for output node.
238
+ :return: New node with broadcast shape.
239
+ """
240
+ inputs = as_nodes(data, target_shape, name=name)
241
+ if mode.upper() == "EXPLICIT":
242
+ inputs.append(as_node(axes_mapping, name=name))
243
+ return _get_node_factory_opset1().create(
244
+ "Broadcast",
245
+ inputs,
246
+ {"mode": mode.upper()},
247
+ )
248
+
249
+
250
+ @nameable_op
251
+ def ctc_greedy_decoder(
252
+ data: NodeInput,
253
+ sequence_mask: NodeInput,
254
+ merge_repeated: bool = True,
255
+ name: Optional[str] = None,
256
+ ) -> Node:
257
+ """Perform greedy decoding on the logits given in input (best path).
258
+
259
+ :param data: Logits on which greedy decoding is performed.
260
+ :param sequence_mask: The tensor with sequence masks for each sequence in the batch.
261
+ :param merge_repeated: The flag for merging repeated labels during the CTC calculation.
262
+ :param name: Optional name for output node.
263
+ :return: The new node performing an CTCGreedyDecoder operation on input tensor.
264
+ """
265
+ node_inputs = as_nodes(data, sequence_mask, name=name)
266
+ return _get_node_factory_opset1().create(
267
+ "CTCGreedyDecoder",
268
+ node_inputs,
269
+ {"ctc_merge_repeated": merge_repeated},
270
+ )
271
+
272
+
273
+ @unary_op
274
+ def ceiling(node: NodeInput, name: Optional[str] = None) -> Node:
275
+ """Return node which applies ceiling to the input node element-wise.
276
+
277
+ :param node: The node providing data to ceiling operation.
278
+ :param name: Optional name for output node.
279
+ :return: The node performing element-wise ceiling.
280
+ """
281
+ return _get_node_factory_opset1().create("Ceiling", [node])
282
+
283
+
284
+ @nameable_op
285
+ def clamp(
286
+ data: NodeInput,
287
+ min_value: ScalarData,
288
+ max_value: ScalarData,
289
+ name: Optional[str] = None,
290
+ ) -> Node:
291
+ """Perform clamp element-wise on data from input node.
292
+
293
+ :param data: Input tensor. One of: input node, array or scalar.
294
+ :param min_value: The lower bound of the <min_value;max_value> range. Scalar value.
295
+ :param max_value: The upper bound of the <min_value;max_value> range. Scalar value.
296
+ :param name: Optional output node name.
297
+ :return: The new node performing a clamp operation on its input data element-wise.
298
+
299
+ Performs a clipping operation on an input value between a pair of boundary values.
300
+
301
+ For each element in `data`, if the element's value is lower than `min_value`,
302
+ it will be replaced with `min_value`. If the value is higher than `max_value`,
303
+ it will be replaced by `max_value`.
304
+ Intermediate values of `data` are returned without change.
305
+
306
+ Clamp uses the following logic:
307
+
308
+ .. code-block:: python
309
+
310
+ if data < min_value:
311
+ data=min_value
312
+ elif data > max_value:
313
+ data=max_value
314
+ """
315
+ return _get_node_factory_opset1().create(
316
+ "Clamp",
317
+ [as_node(data, name=name)],
318
+ {"min": min_value, "max": max_value},
319
+ )
320
+
321
+
322
+ @nameable_op
323
+ def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Node:
324
+ """Concatenate input nodes into single new node along specified axis.
325
+
326
+ :param nodes: The nodes we want concatenate into single new node.
327
+ :param axis: The axis along which we want to concatenate input nodes.
328
+ :param name: The optional new name for output node.
329
+ :return: Return new node that is a concatenation of input nodes.
330
+ """
331
+ return _get_node_factory_opset1().create("Concat", as_nodes(*nodes, name=name), {"axis": axis})
332
+
333
+
334
+ @nameable_op
335
+ def constant(
336
+ value: NumericData,
337
+ dtype: Union[NumericType, Type] = None,
338
+ name: Optional[str] = None,
339
+ ) -> Constant:
340
+ """Create a Constant node from provided value.
341
+
342
+ :param value: One of: array of values or scalar to initialize node with.
343
+ :param dtype: The data type of provided data.
344
+ :param name: Optional name for output node.
345
+ :return: The Constant node initialized with provided data.
346
+ """
347
+ if value is None or (isinstance(value, np.ndarray) and value.size == 0):
348
+ raise ValueError("Cannot create an empty Constant. Please provide valid data.")
349
+ return make_constant_node(value, dtype)
350
+
351
+
352
+ @nameable_op
353
+ def convert(
354
+ data: NodeInput,
355
+ destination_type: Union[str, NumericType, Type],
356
+ name: Optional[str] = None,
357
+ ) -> Node:
358
+ """Return node which casts input node values to specified type.
359
+
360
+ :param data: Node which produces the input tensor.
361
+ :param destination_type: Provides the target type for the conversion.
362
+ :param name: Optional name for the output node.
363
+ :return: New node performing the conversion operation.
364
+ """
365
+ _destination_type = None # type: Union[str, Type]
366
+ if isinstance(destination_type, get_args(NumericType)):
367
+ _destination_type = get_element_type_str(destination_type).lower()
368
+ else:
369
+ _destination_type = destination_type
370
+ return _get_node_factory_opset1().create(
371
+ "Convert",
372
+ [as_node(data, name=name)],
373
+ {"destination_type": _destination_type},
374
+ )
375
+
376
+
377
+ @binary_op
378
+ def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -> Node:
379
+ """Return node which casts data node values to the type of another node.
380
+
381
+ :param data: Node which produces the input tensor
382
+ :param like: Node which provides the target type information for the conversion
383
+ :param name: Optional name for the output node.
384
+ :return: New node performing the conversion operation.
385
+ """
386
+ return _get_node_factory_opset1().create("ConvertLike", [data, like])
387
+
388
+
389
+ @nameable_op
390
+ def convolution(
391
+ data: NodeInput,
392
+ filters: NodeInput,
393
+ strides: List[int],
394
+ pads_begin: List[int],
395
+ pads_end: List[int],
396
+ dilations: List[int],
397
+ auto_pad: str = "EXPLICIT",
398
+ name: Optional[str] = None,
399
+ ) -> Node:
400
+ """Return node performing batched convolution operation.
401
+
402
+ :param data: The node providing data batch tensor.
403
+ :param filter: The node providing filters tensor.
404
+ :param strides: The kernel window movement strides.
405
+ :param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate.
406
+ :param pads_end: The number of zero padding elements to add on each axis above max coordinate
407
+ :param dilations: The data batch dilation strides.
408
+ :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
409
+ :param name: The optional new name for output node.
410
+ :return: New node performing batched convolution operation.
411
+ """
412
+ return _get_node_factory_opset1().create(
413
+ "Convolution",
414
+ as_nodes(data, filters, name=name),
415
+ {
416
+ "strides": strides,
417
+ "pads_begin": pads_begin,
418
+ "pads_end": pads_end,
419
+ "dilations": dilations,
420
+ "auto_pad": auto_pad,
421
+ },
422
+ )
423
+
424
+
425
+ @nameable_op
426
+ def convolution_backprop_data(
427
+ data: NodeInput,
428
+ filters: NodeInput,
429
+ strides: List[int],
430
+ output_shape: Optional[NodeInput] = None,
431
+ pads_begin: Optional[List[int]] = None,
432
+ pads_end: Optional[List[int]] = None,
433
+ dilations: Optional[List[int]] = None,
434
+ auto_pad: Optional[str] = None,
435
+ output_padding: Optional[List[int]] = None,
436
+ name: Optional[str] = None,
437
+ ) -> Node:
438
+ """Create node performing a batched-convolution backprop data operation.
439
+
440
+ :param data: The node producing data from forward-prop
441
+ :param filters: The node producing the filters from forward-prop.
442
+ :param output_shape: The node producing output delta.
443
+ :param strides: The distance (in pixels) to slide the filter on the feature map
444
+ over the axes.
445
+ :param pads_begin: The number of pixels to add to the beginning along each axis.
446
+ :param pads_end: The number of pixels to add to the end along each axis.
447
+ :param dilations: The distance in width and height between elements (weights)
448
+ in the filter.
449
+ :param name: The node name.
450
+
451
+ :return: The node object representing ConvolutionBackpropData operation.
452
+ """
453
+ spatial_dim_count = len(strides)
454
+ if pads_begin is None:
455
+ pads_begin = [0] * spatial_dim_count
456
+ if pads_end is None:
457
+ pads_end = [0] * spatial_dim_count
458
+ if dilations is None:
459
+ dilations = [1] * spatial_dim_count
460
+ if auto_pad is None:
461
+ auto_pad = "explicit"
462
+ if output_padding is None:
463
+ output_padding = [0] * spatial_dim_count
464
+ args = as_nodes(data, filters, name=name)
465
+ if output_shape is not None:
466
+ args.append(as_node(output_shape, name=name))
467
+
468
+ return _get_node_factory_opset1().create(
469
+ "ConvolutionBackpropData",
470
+ args,
471
+ {
472
+ "strides": strides,
473
+ "pads_begin": pads_begin,
474
+ "pads_end": pads_end,
475
+ "dilations": dilations,
476
+ "auto_pad": auto_pad.upper(),
477
+ "output_padding": output_padding,
478
+ },
479
+ )
480
+
481
+
482
+ @unary_op
483
+ def cos(node: NodeInput, name: Optional[str] = None) -> Node:
484
+ """Apply cosine function on the input node element-wise.
485
+
486
+ :param node: One of: input node, array or scalar.
487
+ :param name: Optional new name for output node.
488
+ :return: New node with cos operation applied on it.
489
+ """
490
+ return _get_node_factory_opset1().create("Cos", [node])
491
+
492
+
493
+ @unary_op
494
+ def cosh(node: NodeInput, name: Optional[str] = None) -> Node:
495
+ """Apply hyperbolic cosine function on the input node element-wise.
496
+
497
+ :param node: One of: input node, array or scalar.
498
+ :param name: Optional new name for output node.
499
+ :return: New node with cosh operation applied on it.
500
+ """
501
+ return _get_node_factory_opset1().create("Cosh", [node])
502
+
503
+
504
+ @nameable_op
505
+ def deformable_convolution(
506
+ data: NodeInput,
507
+ deformable_values: NodeInput,
508
+ filters: NodeInput,
509
+ strides: List[int],
510
+ pads_begin: List[int],
511
+ pads_end: List[int],
512
+ dilations: List[int],
513
+ auto_pad: str = "EXPLICIT",
514
+ group: int = 1,
515
+ deformable_group: int = 1,
516
+ name: Optional[str] = None,
517
+ ) -> Node:
518
+ """Create node performing deformable convolution.
519
+
520
+ :param data: The node providing data batch tensor.
521
+ :param filter: The node providing filters tensor.
522
+ :param strides: The distance (in pixels) to slide the filter on the feature map over the axes.
523
+ :param pads_begin: The number of pixels to add to the beginning along each axis.
524
+ :param pads_end: The number of pixels to add to the end along each axis.
525
+ :param dilations: The distance in width and height between elements (weights) in the filter.
526
+ :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
527
+ :param group: The number of groups which both output and input should be split into.
528
+ :param deformable_group: The number of groups which deformable values and output should be split
529
+ into along the channel axis.
530
+ :param name: The optional new name for output node.
531
+ :return: New node performing deformable convolution operation.
532
+ """
533
+ return _get_node_factory_opset1().create(
534
+ "DeformableConvolution",
535
+ as_nodes(data, deformable_values, filters, name=name),
536
+ {
537
+ "strides": strides,
538
+ "pads_begin": pads_begin,
539
+ "pads_end": pads_end,
540
+ "dilations": dilations,
541
+ "auto_pad": auto_pad,
542
+ "group": group,
543
+ "deformable_group": deformable_group,
544
+ },
545
+ )
546
+
547
+
548
+ @nameable_op
549
+ def deformable_psroi_pooling(
550
+ feature_maps: NodeInput,
551
+ coords: NodeInput,
552
+ output_dim: int,
553
+ spatial_scale: float,
554
+ group_size: int = 1,
555
+ mode: str = "bilinear_deformable",
556
+ spatial_bins_x: int = 1,
557
+ spatial_bins_y: int = 1,
558
+ trans_std: float = 1.0,
559
+ part_size: int = 1,
560
+ offsets: Optional[NodeInput] = None,
561
+ name: Optional[str] = None,
562
+ ) -> Node:
563
+ """Return node performing DeformablePSROIPooling operation.
564
+
565
+ DeformablePSROIPooling computes position-sensitive pooling
566
+ on regions of interest specified by input.
567
+
568
+ :param feature_maps: 4D tensor with feature maps.
569
+ :param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2].
570
+ :param output_dim: A pooled output channel number.
571
+ :param spatial_scale: A multiplicative spatial scale factor to translate ROI.
572
+ :param group_size: The number of groups to encode position-sensitive score.
573
+ :param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable'].
574
+ :param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width.
575
+ :param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height.
576
+ :param trans_std: The value that all transformation (offset) values are multiplied with.
577
+ :param part_size: The number of parts the output tensor spatial dimensions are divided into.
578
+ :param offsets: Optional node. 4D input blob with transformation values (offsets).
579
+ :param name: The optional new name for output node.
580
+ :return: New node performing DeformablePSROIPooling operation.
581
+ """
582
+ node_inputs = as_nodes(feature_maps, coords, name=name)
583
+ if offsets is not None:
584
+ node_inputs.append(as_node(offsets, name=name))
585
+
586
+ return _get_node_factory_opset1().create(
587
+ "DeformablePSROIPooling",
588
+ node_inputs,
589
+ {
590
+ "output_dim": output_dim,
591
+ "spatial_scale": spatial_scale,
592
+ "group_size": group_size,
593
+ "mode": mode,
594
+ "spatial_bins_x": spatial_bins_x,
595
+ "spatial_bins_y": spatial_bins_y,
596
+ "trans_std": trans_std,
597
+ "part_size": part_size,
598
+ },
599
+ )
600
+
601
+
602
+ @nameable_op
603
+ def depth_to_space(node: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node:
604
+ """Rearranges input tensor from depth into blocks of spatial data.
605
+
606
+ Values from the height and width dimensions are moved to the depth dimension.
607
+
608
+ Input tensor has shape [N,C,H,W], where N is the batch axis, C is the channel or depth,
609
+ H is the height and W is the width.
610
+
611
+ Output node produces a tensor with shape:
612
+
613
+ [N, C * `block_size` * `block_size`, H / `block_size`, W / `block_size`]
614
+
615
+ :param node: The node with input tensor data.
616
+ :param mode: Specifies how the input depth dimension is split to block coordinates
617
+
618
+ blocks_first: The input is divided to [block_size, ..., block_size, new_depth]
619
+ depth_first: The input is divided to [new_depth, block_size, ..., block_size]
620
+
621
+ :param block_size: The size of the spatial block of values describing
622
+ how the tensor's data is to be rearranged.
623
+ :param name: Optional output node name.
624
+ :return: The new node performing an DepthToSpace operation on its input tensor.
625
+ """
626
+ return _get_node_factory_opset1().create(
627
+ "DepthToSpace",
628
+ [node],
629
+ {"mode": mode, "block_size": block_size},
630
+ )
631
+
632
+
633
+ @nameable_op
634
+ def detection_output(
635
+ box_logits: Node,
636
+ class_preds: Node,
637
+ proposals: Node,
638
+ attrs: dict,
639
+ aux_class_preds: NodeInput = None,
640
+ aux_box_preds: NodeInput = None,
641
+ name: Optional[str] = None,
642
+ ) -> Node:
643
+ """Generate the detection output using information on location and confidence predictions.
644
+
645
+ :param box_logits: The 2D input tensor with box logits.
646
+ :param class_preds: The 2D input tensor with class predictions.
647
+ :param proposals: The 3D input tensor with proposals.
648
+ :param attrs: The dictionary containing key, value pairs for attributes.
649
+ :param aux_class_preds: The 2D input tensor with additional class predictions information.
650
+ :param aux_box_preds: The 2D input tensor with additional box predictions information.
651
+ :param name: Optional name for the output node.
652
+ :return: Node representing DetectionOutput operation.
653
+
654
+ Available attributes are:
655
+
656
+ * num_classes The number of classes to be predicted.
657
+ Range of values: positive integer number
658
+ Default value: None
659
+ Required: yes
660
+
661
+ * background_label_id The background label id.
662
+ Range of values: integer value
663
+ Default value: 0
664
+ Required: no
665
+
666
+ * top_k Maximum number of results to be kept per batch after NMS step.
667
+ Range of values: integer value
668
+ Default value: -1
669
+ Required: no
670
+
671
+ * variance_encoded_in_target The flag that denotes if variance is encoded in target.
672
+ Range of values: {False, True}
673
+ Default value: False
674
+ Required: no
675
+
676
+ * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step.
677
+ Range of values: integer values
678
+ Default value: None
679
+ Required: yes
680
+
681
+ * code_type The type of coding method for bounding boxes.
682
+ Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE',
683
+ 'caffe.PriorBoxParameter.CORNER'}
684
+
685
+ Default value: 'caffe.PriorBoxParameter.CORNER'
686
+ Required: no
687
+
688
+ * share_location The flag that denotes if bounding boxes are shared among different
689
+ classes.
690
+ Range of values: {True, False}
691
+ Default value: True
692
+ Required: no
693
+
694
+ * nms_threshold The threshold to be used in the NMS stage.
695
+ Range of values: floating point value
696
+ Default value: None
697
+ Required: yes
698
+
699
+ * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be
700
+ considered.
701
+ Range of values: floating point value
702
+ Default value: 0
703
+ Required: no
704
+
705
+ * clip_after_nms The flag that denotes whether to perform clip bounding boxes after
706
+ non-maximum suppression or not.
707
+ Range of values: {True, False}
708
+ Default value: False
709
+ Required: no
710
+
711
+ * clip_before_nms The flag that denotes whether to perform clip bounding boxes before
712
+ non-maximum suppression or not.
713
+ Range of values: {True, False}
714
+ Default value: False
715
+ Required: no
716
+
717
+ * decrease_label_id The flag that denotes how to perform NMS.
718
+ Range of values: False - perform NMS like in Caffe*.
719
+ True - perform NMS like in MxNet*.
720
+
721
+ Default value: False
722
+ Required: no
723
+
724
+ * normalized The flag that denotes whether input tensors with boxes are normalized.
725
+ Range of values: {True, False}
726
+ Default value: False
727
+ Required: no
728
+
729
+ * input_height The input image height.
730
+ Range of values: positive integer number
731
+ Default value: 1
732
+ Required: no
733
+
734
+ * input_width The input image width.
735
+ Range of values: positive integer number
736
+ Default value: 1
737
+ Required: no
738
+
739
+ * objectness_score The threshold to sort out confidence predictions.
740
+ Range of values: non-negative float number
741
+ Default value: 0
742
+ Required: no
743
+
744
+ Example of attribute dictionary:
745
+ .. code-block:: python
746
+
747
+ # just required ones
748
+ attrs = {
749
+ 'num_classes': 85,
750
+ 'keep_top_k': [1, 2, 3],
751
+ 'nms_threshold': 0.645,
752
+
753
+ }
754
+
755
+ attrs = {
756
+ 'num_classes': 85,
757
+ 'keep_top_k': [1, 2, 3],
758
+ 'nms_threshold': 0.645,
759
+ 'normalized': True,
760
+ 'clip_before_nms': True,
761
+ 'input_height': [32],
762
+ 'input_width': [32],
763
+
764
+ }
765
+
766
+ Optional attributes which are absent from dictionary will be set with corresponding default.
767
+ """
768
+ requirements = [
769
+ ("num_classes", True, np.integer, is_positive_value),
770
+ ("background_label_id", False, np.integer, None),
771
+ ("top_k", False, np.integer, None),
772
+ ("variance_encoded_in_target", False, np.bool_, None),
773
+ ("keep_top_k", True, np.integer, None),
774
+ ("code_type", False, np.str_, None),
775
+ ("share_location", False, np.bool_, None),
776
+ ("nms_threshold", True, np.floating, None),
777
+ ("confidence_threshold", False, np.floating, None),
778
+ ("clip_after_nms", False, np.bool_, None),
779
+ ("clip_before_nms", False, np.bool_, None),
780
+ ("decrease_label_id", False, np.bool_, None),
781
+ ("normalized", False, np.bool_, None),
782
+ ("input_height", False, np.integer, is_positive_value),
783
+ ("input_width", False, np.integer, is_positive_value),
784
+ ("objectness_score", False, np.floating, is_non_negative_value),
785
+ ]
786
+
787
+ check_valid_attributes("DetectionOutput", attrs, requirements)
788
+
789
+ inputs = [box_logits, class_preds, proposals]
790
+ if aux_class_preds is not None:
791
+ inputs.append(aux_class_preds)
792
+ if aux_box_preds is not None:
793
+ inputs.append(aux_box_preds)
794
+
795
+ return _get_node_factory_opset1().create("DetectionOutput", inputs, attrs)
796
+
797
+
798
+ @binary_op
799
+ def divide(
800
+ left_node: NodeInput,
801
+ right_node: NodeInput,
802
+ auto_broadcast: str = "NUMPY",
803
+ name: Optional[str] = None,
804
+ ) -> Node:
805
+ """Return node which applies f(x) = A/B to the input nodes element-wise.
806
+
807
+ :param left_node: The node providing dividend data.
808
+ :param right_node: The node providing divisor data.
809
+ :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
810
+ :param name: Optional name for output node.
811
+ :return: The node performing element-wise division.
812
+ """
813
+ return _get_node_factory_opset1().create(
814
+ "Divide",
815
+ [left_node, right_node],
816
+ {"auto_broadcast": auto_broadcast.upper()},
817
+ )
818
+
819
+
820
+ @nameable_op
821
+ def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node:
822
+ """Perform Exponential Linear Unit operation element-wise on data from input node.
823
+
824
+ Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise.
825
+
826
+ For more information refer to:
827
+ [Fast and Accurate Deep Network Learning by Exponential Linear Units](http://arxiv.org/abs/1511.07289)
828
+
829
+ :param data: Input tensor. One of: input node, array or scalar.
830
+ :param alpha: Scalar multiplier for negative values.
831
+ :param name: Optional output node name.
832
+ :return: The new node performing an ELU operation on its input data element-wise.
833
+ """
834
+ return _get_node_factory_opset1().create("Elu", [as_node(data, name=name)], {"alpha": alpha})
835
+
836
+
837
+ @binary_op
838
+ def equal(
839
+ left_node: NodeInput,
840
+ right_node: NodeInput,
841
+ auto_broadcast: str = "NUMPY",
842
+ name: Optional[str] = None,
843
+ ) -> Node:
844
+ """Return node which checks if input nodes are equal element-wise.
845
+
846
+ :param left_node: The first input node for equal operation.
847
+ :param right_node: The second input node for equal operation.
848
+ :param auto_broadcast: The type of broadcasting specifies rules used for
849
+ auto-broadcasting of input tensors.
850
+ :param name: The optional name for output new node.
851
+ :return: The node performing element-wise equality check.
852
+ """
853
+ return _get_node_factory_opset1().create(
854
+ "Equal",
855
+ [left_node, right_node],
856
+ {"auto_broadcast": auto_broadcast.upper()},
857
+ )
858
+
859
+
860
+ @unary_op
861
+ def erf(node: NodeInput, name: Optional[str] = None) -> Node:
862
+ """Return node which calculates Gauss error function element-wise with given tensor.
863
+
864
+ :param node: The node providing data for operation.
865
+ :param name: The optional name for new output node.
866
+ :return: The new node performing element-wise Erf operation.
867
+ """
868
+ return _get_node_factory_opset1().create("Erf", [node])
869
+
870
+
871
+ @unary_op
872
+ def exp(node: NodeInput, name: Optional[str] = None) -> Node:
873
+ """Return node which applies exponential function to the input node element-wise.
874
+
875
+ :param node: The node providing data for operation.
876
+ :param name: The optional name for new output node.
877
+ :return: The new node performing natural exponential operation.
878
+ """
879
+ return _get_node_factory_opset1().create("Exp", [node])
880
+
881
+
882
+ @nameable_op
883
+ def fake_quantize(
884
+ data: NodeInput,
885
+ input_low: NodeInput,
886
+ input_high: NodeInput,
887
+ output_low: NodeInput,
888
+ output_high: NodeInput,
889
+ levels: int,
890
+ auto_broadcast: str = "NUMPY",
891
+ name: Optional[str] = None,
892
+ ) -> Node:
893
+ r"""Perform an element-wise linear quantization on input data.
894
+
895
+ :param data: The node with data tensor.
896
+ :param input_low: The node with the minimum for input values.
897
+ :param input_high: The node with the maximum for input values.
898
+ :param output_low: The node with the minimum quantized value.
899
+ :param output_high: The node with the maximum quantized value.
900
+ :param levels: The number of quantization levels. Integer value.
901
+ :param auto_broadcast: The type of broadcasting specifies rules used for
902
+ auto-broadcasting of input tensors.
903
+ :return: New node with quantized value.
904
+
905
+ Input floating point values are quantized into a discrete set of floating point values.
906
+
907
+ .. code-block:: python
908
+
909
+ if x <= input_low:
910
+ output = output_low
911
+ if x > input_high:
912
+ output = output_high
913
+ else:
914
+ output = fake_quantize(output)
915
+
916
+ Fake quantize uses the following logic:
917
+
918
+ \f[ output =
919
+ \dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})}
920
+ {(levels-1)\cdot (output\_high - output\_low)} + output\_low \f]
921
+ """
922
+ return _get_node_factory_opset1().create(
923
+ "FakeQuantize",
924
+ as_nodes(data, input_low, input_high, output_low, output_high, name=name),
925
+ {"levels": levels, "auto_broadcast": auto_broadcast.upper()},
926
+ )
927
+
928
+
929
+ @unary_op
930
+ def floor(node: NodeInput, name: Optional[str] = None) -> Node:
931
+ """Return node which applies floor to the input node element-wise.
932
+
933
+ :param node: The input node providing data.
934
+ :param name: The optional name for new output node.
935
+ :return: The node performing element-wise floor operation.
936
+ """
937
+ return _get_node_factory_opset1().create("Floor", [node])
938
+
939
+
940
+ @binary_op
941
+ def floor_mod(
942
+ left_node: NodeInput,
943
+ right_node: NodeInput,
944
+ auto_broadcast: str = "NUMPY",
945
+ name: Optional[str] = None,
946
+ ) -> Node:
947
+ """Return node performing element-wise FloorMod (division reminder) with two given tensors.
948
+
949
+ :param left_node: The first input node for FloorMod operation.
950
+ :param right_node: The second input node for FloorMod operation.
951
+ :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
952
+ :param name: Optional name for output node.
953
+ :return: The node performing element-wise FloorMod operation.
954
+ """
955
+ return _get_node_factory_opset1().create(
956
+ "FloorMod",
957
+ [left_node, right_node],
958
+ {"auto_broadcast": auto_broadcast.upper()},
959
+ )
960
+
961
+
962
+ @nameable_op
963
+ def gather(
964
+ data: NodeInput,
965
+ indices: NodeInput,
966
+ axis: NodeInput,
967
+ name: Optional[str] = None,
968
+ ) -> Node:
969
+ """Return Gather node which takes slices from axis of data according to indices.
970
+
971
+ :param data: The tensor from which slices are gathered.
972
+ :param indices: Tensor with indexes to gather.
973
+ :param axis: The dimension index to gather data from.
974
+ :param name: Optional name for output node.
975
+ :return: The new node performing a Gather operation on the data input tensor.
976
+ """
977
+ node_inputs = as_nodes(data, indices, axis, name=name)
978
+ return _get_node_factory_opset1().create("Gather", node_inputs)
979
+
980
+
981
+ @nameable_op
982
+ def gather_tree(
983
+ step_ids: NodeInput,
984
+ parent_idx: NodeInput,
985
+ max_seq_len: NodeInput,
986
+ end_token: NodeInput,
987
+ name: Optional[str] = None,
988
+ ) -> Node:
989
+ """Perform GatherTree operation.
990
+
991
+ :param step_ids: The tensor with indices from per each step.
992
+ :param parent_idx: The tensor with with parent beam indices.
993
+ :param max_seq_len: The tensor with maximum lengths for each sequence in the batch.
994
+ :param end_token: The scalar tensor with value of the end marker in a sequence.
995
+ :param name: Optional name for output node.
996
+ :return: The new node performing a GatherTree operation.
997
+
998
+ The GatherTree node generates the complete beams from the indices per each step
999
+ and the parent beam indices.
1000
+ GatherTree uses the following logic:
1001
+
1002
+ .. code-block:: python
1003
+
1004
+ for batch in range(BATCH_SIZE):
1005
+ for beam in range(BEAM_WIDTH):
1006
+ max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch])
1007
+
1008
+ parent = parent_idx[max_sequence_in_beam - 1, batch, beam]
1009
+
1010
+ for level in reversed(range(max_sequence_in_beam - 1)):
1011
+ final_idx[level, batch, beam] = step_idx[level, batch, parent]
1012
+
1013
+ parent = parent_idx[level, batch, parent]
1014
+ """
1015
+ node_inputs = as_nodes(step_ids, parent_idx, max_seq_len, end_token, name=name)
1016
+ return _get_node_factory_opset1().create("GatherTree", node_inputs)
1017
+
1018
+
1019
+ @binary_op
1020
+ def greater(
1021
+ left_node: NodeInput,
1022
+ right_node: NodeInput,
1023
+ auto_broadcast: str = "NUMPY",
1024
+ name: Optional[str] = None,
1025
+ ) -> Node:
1026
+ """Return node which checks if left input node is greater than the right node element-wise.
1027
+
1028
+ :param left_node: The first input node providing data.
1029
+ :param right_node: The second input node providing data.
1030
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1031
+ auto-broadcasting of input tensors.
1032
+ :param name: The optional new name for output node.
1033
+ :return: The node performing element-wise check whether left_node is greater than right_node.
1034
+ """
1035
+ return _get_node_factory_opset1().create(
1036
+ "Greater",
1037
+ [left_node, right_node],
1038
+ {"auto_broadcast": auto_broadcast.upper()},
1039
+ )
1040
+
1041
+
1042
+ @binary_op
1043
+ def greater_equal(
1044
+ left_node: NodeInput,
1045
+ right_node: NodeInput,
1046
+ auto_broadcast: str = "NUMPY",
1047
+ name: Optional[str] = None,
1048
+ ) -> Node:
1049
+ """Return node which checks if left node is greater or equal to the right node element-wise.
1050
+
1051
+ :param left_node: The first input node providing data.
1052
+ :param right_node: The second input node providing data.
1053
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1054
+ auto-broadcasting of input tensors.
1055
+ :param name: The optional new name for output node.
1056
+
1057
+ :return: The node performing element-wise check whether left_node is greater than or equal right_node.
1058
+ """
1059
+ return _get_node_factory_opset1().create(
1060
+ "GreaterEqual",
1061
+ [left_node, right_node],
1062
+ {"auto_broadcast": auto_broadcast.upper()},
1063
+ )
1064
+
1065
+
1066
+ def grn(data: Node, bias: float, name: Optional[str] = None) -> Node:
1067
+ r"""Perform Global Response Normalization with L2 norm (across channels only).
1068
+
1069
+ Computes GRN operation on channels for input tensor:
1070
+
1071
+ \f[ output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} \f]
1072
+
1073
+ :param data: The node with data tensor.
1074
+ :param bias: The bias added to the variance. Scalar value.
1075
+ :param name: Optional output node name.
1076
+ :return: The new node performing a GRN operation on tensor's channels.
1077
+ """
1078
+ return _get_node_factory_opset1().create("GRN", [data], {"bias": bias})
1079
+
1080
+
1081
+ @nameable_op
1082
+ def group_convolution(
1083
+ data: NodeInput,
1084
+ filters: NodeInput,
1085
+ strides: List[int],
1086
+ pads_begin: List[int],
1087
+ pads_end: List[int],
1088
+ dilations: List[int],
1089
+ auto_pad: str = "EXPLICIT",
1090
+ name: Optional[str] = None,
1091
+ ) -> Node:
1092
+ """Perform Group Convolution operation on data from input node.
1093
+
1094
+ :param data: The node producing input data.
1095
+ :param filters: The node producing filters data.
1096
+ :param strides: The distance (in pixels) to slide the filter on the feature map
1097
+ over the axes.
1098
+ :param pads_begin: The number of pixels to add at the beginning along each axis.
1099
+ :param pads_end: The number of pixels to add at the end along each axis.
1100
+ :param dilations: The distance in width and height between elements (weights) in the filter.
1101
+ :param auto_pad: Describes how to perform padding. Possible values:
1102
+ EXPLICIT: Pad dimensions are explicity specified
1103
+ SAME_LOWER: Pad dimensions computed to match input shape
1104
+ Ceil(num_dims/2) at the beginning and
1105
+ Floor(num_dims/2) at the end
1106
+
1107
+ SAME_UPPER: Pad dimensions computed to match input shape
1108
+ Floor(num_dims/2) at the beginning and
1109
+ Ceil(num_dims/2) at the end
1110
+
1111
+ VALID: No padding
1112
+ :param name: Optional output node name.
1113
+ :return: The new node performing a Group Convolution operation on tensor from input node.
1114
+ """
1115
+ return _get_node_factory_opset1().create(
1116
+ "GroupConvolution",
1117
+ as_nodes(data, filters, name=name),
1118
+ {
1119
+ "strides": strides,
1120
+ "pads_begin": pads_begin,
1121
+ "pads_end": pads_end,
1122
+ "dilations": dilations,
1123
+ "auto_pad": auto_pad.upper(),
1124
+ },
1125
+ )
1126
+
1127
+
1128
+ @nameable_op
1129
+ def group_convolution_backprop_data(
1130
+ data: NodeInput,
1131
+ filters: NodeInput,
1132
+ strides: List[int],
1133
+ output_shape: Optional[NodeInput] = None,
1134
+ pads_begin: Optional[List[int]] = None,
1135
+ pads_end: Optional[List[int]] = None,
1136
+ dilations: Optional[List[int]] = None,
1137
+ auto_pad: str = "EXPLICIT",
1138
+ output_padding: Optional[List[int]] = None,
1139
+ name: Optional[str] = None,
1140
+ ) -> Node:
1141
+ """Perform Group Convolution operation on data from input node.
1142
+
1143
+ :param data: The node producing input data.
1144
+ :param filters: The node producing filter data.
1145
+ :param strides: The distance (in pixels) to slide the filter on the feature map
1146
+ over the axes.
1147
+ :param output_shape: The node that specifies spatial shape of the output.
1148
+ :param pads_begin: The number of pixels to add at the beginning along each axis.
1149
+ :param pads_end: The number of pixels to add at the end along each axis.
1150
+ :param dilations: The distance in width and height between elements (weights)
1151
+ in the filter.
1152
+ :param auto_pad: Describes how to perform padding. Possible values:
1153
+ EXPLICIT: Pad dimensions are explicity specified
1154
+ SAME_LOWER: Pad dimensions computed to match input shape
1155
+ Ceil(num_dims/2) at the beginning and
1156
+ Floor(num_dims/2) at the end
1157
+
1158
+ SAME_UPPER: Pad dimensions computed to match input shape
1159
+ Floor(num_dims/2) at the beginning and
1160
+ Ceil(num_dims/2) at the end
1161
+
1162
+ VALID: No padding
1163
+
1164
+ :param output_padding: The additional amount of paddings added per each spatial axis
1165
+ in the output tensor.
1166
+ :param name: Optional output node name.
1167
+ :return: The new node performing a Group Convolution operation on tensor from input node.
1168
+ """
1169
+ spatial_dim_count = len(strides)
1170
+ if dilations is None:
1171
+ dilations = [1] * spatial_dim_count
1172
+ if output_padding is None:
1173
+ output_padding = [0] * spatial_dim_count
1174
+
1175
+ attributes = {
1176
+ "strides": strides,
1177
+ "dilations": dilations,
1178
+ "auto_pad": auto_pad.upper(),
1179
+ "output_padding": output_padding,
1180
+ }
1181
+ args = as_nodes(data, filters, name=name)
1182
+
1183
+ if output_shape is not None:
1184
+ args.append(as_node(output_shape, name=name))
1185
+ else:
1186
+ if pads_begin is None:
1187
+ pads_begin = [0] * spatial_dim_count
1188
+ if pads_end is None:
1189
+ pads_end = [0] * spatial_dim_count
1190
+ attributes["pads_begin"] = pads_begin
1191
+ attributes["pads_end"] = pads_end
1192
+
1193
+ return _get_node_factory_opset1().create("GroupConvolutionBackpropData", args, attributes)
1194
+
1195
+
1196
+ @nameable_op
1197
+ def hard_sigmoid(
1198
+ data: Node,
1199
+ alpha: NodeInput,
1200
+ beta: NodeInput,
1201
+ name: Optional[str] = None,
1202
+ ) -> Node:
1203
+ """Perform Hard Sigmoid operation element-wise on data from input node.
1204
+
1205
+ :param data: The node with data tensor.
1206
+ :param alpha: A node producing the alpha parameter.
1207
+ :param beta: A node producing the beta parameter
1208
+ :param name: Optional output node name.
1209
+ :return: The new node performing a Hard Sigmoid element-wise on input tensor.
1210
+
1211
+ Hard Sigmoid uses the following logic:
1212
+
1213
+ .. code-block:: python
1214
+
1215
+ y = max(0, min(1, alpha * data + beta))
1216
+ """
1217
+ return _get_node_factory_opset1().create("HardSigmoid", [data, as_node(alpha, name=name), as_node(beta, name=name)])
1218
+
1219
+
1220
+ @nameable_op
1221
+ def interpolate(
1222
+ image: Node,
1223
+ output_shape: NodeInput,
1224
+ attrs: dict,
1225
+ name: Optional[str] = None,
1226
+ ) -> Node:
1227
+ """Perform interpolation of independent slices in input tensor.
1228
+
1229
+ :param image: The node providing input tensor with data for interpolation.
1230
+ :param output_shape: 1D tensor describing output shape for spatial axes.
1231
+ :param attrs: The dictionary containing key, value pairs for attributes.
1232
+ :param name: Optional name for the output node.
1233
+ :return: Node representing interpolation operation.
1234
+
1235
+ Available attributes are:
1236
+
1237
+ * axes Specify spatial dimension indices where interpolation is applied.
1238
+ Type: List of non-negative integer numbers.
1239
+ Required: yes.
1240
+
1241
+ * mode Specifies type of interpolation.
1242
+ Range of values: one of {nearest, linear, cubic, area}
1243
+ Type: string
1244
+ Required: yes
1245
+
1246
+ * align_corners A flag that specifies whether to align corners or not. True means the
1247
+ alignment is applied, False means the alignment isn't applied.
1248
+ Range of values: True or False. Default: True.
1249
+ Required: no
1250
+
1251
+ * antialias A flag that specifies whether to perform anti-aliasing.
1252
+ Range of values: False - do not perform anti-aliasing
1253
+ True - perform anti-aliasing
1254
+
1255
+ Default value: False
1256
+ Required: no
1257
+
1258
+ * pads_begin Specify the number of pixels to add to the beginning of the image being
1259
+ interpolated. A scalar that specifies padding for each spatial dimension.
1260
+ Range of values: list of non-negative integer numbers. Default value: 0
1261
+ Required: no
1262
+
1263
+ * pads_end Specify the number of pixels to add to the beginning of the image being
1264
+ interpolated. A scalar that specifies padding for each spatial dimension.
1265
+ Range of values: list of non-negative integer numbers. Default value: 0
1266
+ Required: no
1267
+
1268
+ Example of attribute dictionary:
1269
+
1270
+ .. code-block:: python
1271
+
1272
+ # just required ones
1273
+ attrs = {
1274
+ 'axes': [2, 3],
1275
+ 'mode': 'cubic',
1276
+ }
1277
+
1278
+ attrs = {
1279
+ 'axes': [2, 3],
1280
+ 'mode': 'cubic',
1281
+ 'antialias': True,
1282
+ 'pads_begin': [2, 2, 2],
1283
+ }
1284
+
1285
+ Optional attributes which are absent from dictionary will be set with corresponding default.
1286
+ """
1287
+ requirements = [
1288
+ ("axes", True, np.integer, is_non_negative_value),
1289
+ ("mode", True, np.str_, None),
1290
+ ("align_corners", False, np.bool_, None),
1291
+ ("antialias", False, np.bool_, None),
1292
+ ("pads_begin", False, np.integer, is_non_negative_value),
1293
+ ("pads_end", False, np.integer, is_non_negative_value),
1294
+ ]
1295
+
1296
+ check_valid_attributes("Interpolate", attrs, requirements)
1297
+
1298
+ return _get_node_factory_opset1().create("Interpolate", [image, as_node(output_shape, name=name)], attrs)
1299
+
1300
+
1301
+ @binary_op
1302
+ def less(
1303
+ left_node: NodeInput,
1304
+ right_node: NodeInput,
1305
+ auto_broadcast: str = "NUMPY",
1306
+ name: Optional[str] = None,
1307
+ ) -> Node:
1308
+ """Return node which checks if left input node is less than the right node element-wise.
1309
+
1310
+ :param left_node: The first input node providing data.
1311
+ :param right_node: The second input node providing data.
1312
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1313
+ auto-broadcasting of input tensors.
1314
+ :param name: The optional new name for output node.
1315
+ :return: The node performing element-wise check whether left_node is less than the right_node.
1316
+ """
1317
+ return _get_node_factory_opset1().create(
1318
+ "Less",
1319
+ [left_node, right_node],
1320
+ {"auto_broadcast": auto_broadcast.upper()},
1321
+ )
1322
+
1323
+
1324
+ @binary_op
1325
+ def less_equal(
1326
+ left_node: NodeInput,
1327
+ right_node: NodeInput,
1328
+ auto_broadcast: str = "NUMPY",
1329
+ name: Optional[str] = None,
1330
+ ) -> Node:
1331
+ """Return node which checks if left input node is less or equal the right node element-wise.
1332
+
1333
+ :param left_node: The first input node providing data.
1334
+ :param right_node: The second input node providing data.
1335
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1336
+ auto-broadcasting of input tensors.
1337
+ :param name: The optional new name for output node.
1338
+ :return: The node performing element-wise check whether left_node is less than or equal the
1339
+ right_node.
1340
+ """
1341
+ return _get_node_factory_opset1().create(
1342
+ "LessEqual",
1343
+ [left_node, right_node],
1344
+ {"auto_broadcast": auto_broadcast.upper()},
1345
+ )
1346
+
1347
+
1348
+ @unary_op
1349
+ def log(node: NodeInput, name: Optional[str] = None) -> Node:
1350
+ """Return node which applies natural logarithm to the input node element-wise.
1351
+
1352
+ :param node: The input node providing data for operation.
1353
+ :param name: The optional new name for output node.
1354
+ :return: The new node performing log operation element-wise.
1355
+ """
1356
+ return _get_node_factory_opset1().create("Log", [node])
1357
+
1358
+
1359
+ @binary_op
1360
+ def logical_and(
1361
+ left_node: NodeInput,
1362
+ right_node: NodeInput,
1363
+ auto_broadcast: str = "NUMPY",
1364
+ name: Optional[str] = None,
1365
+ ) -> Node:
1366
+ """Return node which perform logical and operation on input nodes element-wise.
1367
+
1368
+ :param left_node: The first input node providing data.
1369
+ :param right_node: The second input node providing data.
1370
+ :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
1371
+ to output shape axes. Range of values: numpy, explicit.
1372
+ :param name: The optional new name for output node.
1373
+ :return: The node performing logical and operation on input nodes corresponding elements.
1374
+ """
1375
+ return _get_node_factory_opset1().create(
1376
+ "LogicalAnd",
1377
+ [left_node, right_node],
1378
+ {"auto_broadcast": auto_broadcast.upper()},
1379
+ )
1380
+
1381
+
1382
+ @unary_op
1383
+ def logical_not(node: NodeInput, name: Optional[str] = None) -> Node:
1384
+ """Return node which applies element-wise logical negation to the input node.
1385
+
1386
+ :param node: The input node providing data.
1387
+ :param name: The optional new name for output node.
1388
+ :return: The node performing element-wise logical NOT operation with given tensor.
1389
+ """
1390
+ return _get_node_factory_opset1().create("LogicalNot", [node])
1391
+
1392
+
1393
+ @binary_op
1394
+ def logical_or(
1395
+ left_node: NodeInput,
1396
+ right_node: NodeInput,
1397
+ auto_broadcast: str = "NUMPY",
1398
+ name: Optional[str] = None,
1399
+ ) -> Node:
1400
+ """Return node which performs logical OR operation on input nodes element-wise.
1401
+
1402
+ :param left_node: The first input node providing data.
1403
+ :param right_node: The second input node providing data.
1404
+ :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
1405
+ to output shape axes. Range of values: numpy, explicit.
1406
+ :param name: The optional new name for output node.
1407
+ :return: The node performing logical or operation on input nodes corresponding elements.
1408
+ """
1409
+ return _get_node_factory_opset1().create(
1410
+ "LogicalOr",
1411
+ [left_node, right_node],
1412
+ {"auto_broadcast": auto_broadcast.upper()},
1413
+ )
1414
+
1415
+
1416
+ @binary_op
1417
+ def logical_xor(
1418
+ left_node: NodeInput,
1419
+ right_node: NodeInput,
1420
+ auto_broadcast: str = "NUMPY",
1421
+ name: Optional[str] = None,
1422
+ ) -> Node:
1423
+ """Return node which performs logical XOR operation on input nodes element-wise.
1424
+
1425
+ :param left_node: The first input node providing data.
1426
+ :param right_node: The second input node providing data.
1427
+ :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
1428
+ to output shape axes. Range of values: numpy, explicit.
1429
+ :param name: The optional new name for output node.
1430
+ :return: The node performing logical or operation on input nodes corresponding elements.
1431
+ """
1432
+ return _get_node_factory_opset1().create(
1433
+ "LogicalXor",
1434
+ [left_node, right_node],
1435
+ {"auto_broadcast": auto_broadcast.upper()},
1436
+ )
1437
+
1438
+
1439
+ @nameable_op
1440
+ def lrn(
1441
+ data: NodeInput,
1442
+ axes: NodeInput,
1443
+ alpha: float = 1,
1444
+ beta: float = 0.5,
1445
+ bias: float = 1,
1446
+ size: int = 5,
1447
+ name: Optional[str] = None,
1448
+ ) -> Node:
1449
+ """Return a node which performs element-wise Local Response Normalization (LRN) operation.
1450
+
1451
+ :param data: Input data.
1452
+ :param alpha: A scale factor (usually positive).
1453
+ :param beta: An exponent.
1454
+ :param bias: An offset (usually positive) to avoid dividing by 0.
1455
+ :param size: Width of the 1-D normalization window.
1456
+ :param name: An optional name of the output node.
1457
+ :return: The new node which performs LRN.
1458
+ """
1459
+ attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size}
1460
+ return _get_node_factory_opset1().create("LRN", as_nodes(data, axes, name=name), attributes)
1461
+
1462
+
1463
+ @nameable_op
1464
+ def lstm_cell(
1465
+ X: NodeInput,
1466
+ initial_hidden_state: NodeInput,
1467
+ initial_cell_state: NodeInput,
1468
+ W: NodeInput,
1469
+ R: NodeInput,
1470
+ B: NodeInput,
1471
+ hidden_size: int,
1472
+ activations: Optional[List[str]] = None,
1473
+ activations_alpha: Optional[List[float]] = None,
1474
+ activations_beta: Optional[List[float]] = None,
1475
+ clip: float = 0.0,
1476
+ name: Optional[str] = None,
1477
+ ) -> Node:
1478
+ """Return a node which performs LSTMCell operation.
1479
+
1480
+ :param X: The input tensor with shape: [batch_size, input_size].
1481
+ :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size].
1482
+ :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size].
1483
+ :param W: The weight tensor with shape: [4*hidden_size, input_size].
1484
+ :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size].
1485
+ :param B: The bias tensor for gates with shape: [4*hidden_size].
1486
+ :param hidden_size: Specifies hidden state size.
1487
+ :param activations: The list of three activation functions for gates.
1488
+ :param activations_alpha: The list of alpha parameters for activation functions.
1489
+ :param activations_beta: The list of beta parameters for activation functions.
1490
+ :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
1491
+ :param name: An optional name of the output node.
1492
+
1493
+ :return: The new node represents LSTMCell. Node outputs count: 2.
1494
+ """
1495
+ if activations is None:
1496
+ activations = ["sigmoid", "tanh", "tanh"]
1497
+ if activations_alpha is None:
1498
+ activations_alpha = []
1499
+ if activations_beta is None:
1500
+ activations_beta = []
1501
+
1502
+ node_inputs = as_nodes(
1503
+ X,
1504
+ initial_hidden_state,
1505
+ initial_cell_state,
1506
+ W,
1507
+ R,
1508
+ B,
1509
+ name=name,
1510
+ )
1511
+
1512
+ # P - nGraph additional input, no such input in the OV spec
1513
+ peepholes_count = 3 # nGraph default
1514
+ peepholes_shape = [peepholes_count * hidden_size]
1515
+ peepholes_array = np.zeros(peepholes_shape) # nGraph default
1516
+ data_dtype = get_dtype(node_inputs[0].get_output_element_type(0))
1517
+ default_p = make_constant_node(peepholes_array, dtype=data_dtype)
1518
+ node_inputs.append(default_p)
1519
+
1520
+ weights_format = "fico" # OV LSTMWeightsFormat, no such attribute in the OV spec
1521
+ input_forget = False # nGraph default, no such attribute in the OV spec
1522
+
1523
+ attributes = {
1524
+ "hidden_size": hidden_size,
1525
+ "activations": activations,
1526
+ "activations_alpha": activations_alpha,
1527
+ "activations_beta": activations_beta,
1528
+ "clip": clip,
1529
+ "weights_format": weights_format,
1530
+ "input_forget": input_forget,
1531
+ }
1532
+ return _get_node_factory_opset1().create("LSTMCell", node_inputs, attributes)
1533
+
1534
+
1535
+ @deprecated(version="2025.0", message="Use lstm_sequence from opset 5")
1536
+ @nameable_op
1537
+ def lstm_sequence(
1538
+ X: NodeInput,
1539
+ initial_hidden_state: NodeInput,
1540
+ initial_cell_state: NodeInput,
1541
+ sequence_lengths: NodeInput,
1542
+ W: NodeInput,
1543
+ R: NodeInput,
1544
+ B: NodeInput,
1545
+ hidden_size: int,
1546
+ direction: str,
1547
+ activations: Optional[List[str]] = None,
1548
+ activations_alpha: Optional[List[float]] = None,
1549
+ activations_beta: Optional[List[float]] = None,
1550
+ clip: float = 0.0,
1551
+ name: Optional[str] = None,
1552
+ ) -> Node:
1553
+ """Return a node which performs LSTMSequence operation.
1554
+
1555
+ :param X: The input tensor. Shape: [batch_size, seq_length, input_size].
1556
+ :param initial_hidden_state: The hidden state tensor.
1557
+ Shape: [batch_size, num_directions, hidden_size].
1558
+ :param initial_cell_state: The cell state tensor.
1559
+ Shape: [batch_size, num_directions, hidden_size].
1560
+ :param sequence_lengths: Specifies real sequence lengths for each batch element.
1561
+ Shape: [batch_size]. Integer type.
1562
+ :param W: Tensor with weights for matrix multiplication operation with input portion of data.
1563
+ Shape: [num_directions, 4*hidden_size, input_size].
1564
+ :param R: The tensor with weights for matrix multiplication operation with hidden state.
1565
+ Shape: [num_directions, 4*hidden_size, hidden_size].
1566
+ :param B: The tensor with biases.
1567
+ Shape: [num_directions, 4*hidden_size].
1568
+ :param hidden_size: Specifies hidden state size.
1569
+ :param direction: Specifies if the RNN is forward, reverse, or bidirectional.
1570
+ :param activations: The list of three activation functions for gates.
1571
+ :param activations_alpha: The list of alpha parameters for activation functions.
1572
+ :param activations_beta: The list of beta parameters for activation functions.
1573
+ :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
1574
+ :param name: An optional name of the output node.
1575
+
1576
+ :return: The new node represents LSTMSequence. Node outputs count: 3.
1577
+ """
1578
+ if activations is None:
1579
+ activations = ["sigmoid", "tanh", "tanh"]
1580
+ if activations_alpha is None:
1581
+ activations_alpha = []
1582
+ if activations_beta is None:
1583
+ activations_beta = []
1584
+
1585
+ node_inputs = as_nodes(
1586
+ X,
1587
+ initial_hidden_state,
1588
+ initial_cell_state,
1589
+ sequence_lengths,
1590
+ W,
1591
+ R,
1592
+ B,
1593
+ name=name,
1594
+ )
1595
+
1596
+ # P - nGraph additional input, no such input in the OV spec
1597
+ peepholes_count = 3 # nGraph default
1598
+ if direction.lower() == "bidirectional":
1599
+ num_directions = 2
1600
+ else:
1601
+ num_directions = 1
1602
+ peepholes_shape = [num_directions, peepholes_count * hidden_size]
1603
+ peepholes_array = np.zeros(peepholes_shape) # nGraph default
1604
+ data_dtype = get_dtype(node_inputs[0].get_output_element_type(0))
1605
+ default_p = make_constant_node(peepholes_array, dtype=data_dtype)
1606
+ node_inputs.append(default_p)
1607
+
1608
+ weights_format = "fico" # OV LSTMWeightsFormat, no such attribute in the OV spec
1609
+ input_forget = False # nGraph default, no such attribute in the OV spec
1610
+
1611
+ attributes = {
1612
+ "hidden_size": hidden_size,
1613
+ "direction": direction.lower(),
1614
+ "activations": activations,
1615
+ "activations_alpha": activations_alpha,
1616
+ "activations_beta": activations_beta,
1617
+ "clip": clip,
1618
+ "weights_format": weights_format,
1619
+ "input_forget": input_forget,
1620
+ }
1621
+ return _get_node_factory_opset1().create("LSTMSequence", node_inputs, attributes)
1622
+
1623
+
1624
+ @nameable_op
1625
+ def matmul(
1626
+ data_a: NodeInput,
1627
+ data_b: NodeInput,
1628
+ transpose_a: bool,
1629
+ transpose_b: bool,
1630
+ name: Optional[str] = None,
1631
+ ) -> Node:
1632
+ """Return the Matrix Multiplication operation.
1633
+
1634
+ :param data_a: left-hand side matrix
1635
+ :param data_b: right-hand side matrix
1636
+ :param transpose_a: should the first matrix be transposed before operation
1637
+ :param transpose_b: should the second matrix be transposed
1638
+ :return: MatMul operation node
1639
+ """
1640
+ return _get_node_factory_opset1().create(
1641
+ "MatMul",
1642
+ as_nodes(data_a, data_b, name=name),
1643
+ {"transpose_a": transpose_a, "transpose_b": transpose_b},
1644
+ )
1645
+
1646
+
1647
+ @nameable_op
1648
+ def max_pool(
1649
+ data: NodeInput,
1650
+ strides: List[int],
1651
+ pads_begin: List[int],
1652
+ pads_end: List[int],
1653
+ kernel_shape: TensorShape,
1654
+ rounding_type: str = "floor",
1655
+ auto_pad: Optional[str] = None,
1656
+ name: Optional[str] = None,
1657
+ ) -> Node:
1658
+ """Perform max pooling operation with given parameters on provided data.
1659
+
1660
+ :param data: The node providing input data.
1661
+ :param strides: The distance (in pixels) to slide the filter on the feature map
1662
+ over the axes.
1663
+ :param pads_begin: The number of pixels to add at the beginning along each axis.
1664
+ :param pads_end: The number of pixels to add at the end along each axis.
1665
+ :param kernel_shape: The pooling operation kernel shape.
1666
+ :param rounding_type: Determines used rounding schema when computing output shape. Acceptable
1667
+ values are: ['floor', 'ceil']
1668
+ :param auto_pad: Determines how the padding is calculated. Acceptable values:
1669
+ [None, 'same_upper', 'same_lower', 'valid']
1670
+ :param name: The optional name for the created output node.
1671
+
1672
+ :return: The new node performing max pooling operation.
1673
+ """
1674
+ if auto_pad is None:
1675
+ auto_pad = "explicit"
1676
+ return _get_node_factory_opset1().create(
1677
+ "MaxPool",
1678
+ [as_node(data, name=name)],
1679
+ {
1680
+ "strides": strides,
1681
+ "pads_begin": pads_begin,
1682
+ "pads_end": pads_end,
1683
+ "kernel": kernel_shape,
1684
+ "rounding_type": rounding_type.upper(),
1685
+ "auto_pad": auto_pad.upper(),
1686
+ },
1687
+ )
1688
+
1689
+
1690
+ @binary_op
1691
+ def maximum(
1692
+ left_node: NodeInput,
1693
+ right_node: NodeInput,
1694
+ auto_broadcast: str = "NUMPY",
1695
+ name: Optional[str] = None,
1696
+ ) -> Node:
1697
+ """Return node which applies the maximum operation to input nodes elementwise.
1698
+
1699
+ :param left_node: The first input node for maximum operation.
1700
+ :param right_node: The second input node for maximum operation.
1701
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1702
+ auto-broadcasting of input tensors. Defaults to "NUMPY".
1703
+ :param name: The optional name for output new node.
1704
+ :return: The node performing element-wise maximum operation.
1705
+ """
1706
+ return _get_node_factory_opset1().create(
1707
+ "Maximum",
1708
+ [left_node, right_node],
1709
+ {"auto_broadcast": auto_broadcast.upper()},
1710
+ )
1711
+
1712
+
1713
+ @binary_op
1714
+ def minimum(
1715
+ left_node: NodeInput,
1716
+ right_node: NodeInput,
1717
+ auto_broadcast: str = "NUMPY",
1718
+ name: Optional[str] = None,
1719
+ ) -> Node:
1720
+ """Return node which applies the minimum operation to input nodes elementwise.
1721
+
1722
+ :param left_node: The first input node for minimum operation.
1723
+ :param right_node: The second input node for minimum operation.
1724
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1725
+ auto-broadcasting of input tensors. Defaults to "NUMPY".
1726
+ :param name: The optional name for output new node.
1727
+ :return: The node performing element-wise minimum operation.
1728
+ """
1729
+ return _get_node_factory_opset1().create(
1730
+ "Minimum",
1731
+ [left_node, right_node],
1732
+ {"auto_broadcast": auto_broadcast.upper()},
1733
+ )
1734
+
1735
+
1736
+ @binary_op
1737
+ def mod(
1738
+ left_node: NodeInput,
1739
+ right_node: NodeInput,
1740
+ auto_broadcast: str = "NUMPY",
1741
+ name: Optional[str] = None,
1742
+ ) -> Node:
1743
+ """Return node performing element-wise division reminder with two given tensors.
1744
+
1745
+ :param left_node: The first input node for mod operation.
1746
+ :param right_node: The second input node for mod operation.
1747
+ :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
1748
+ :param name: Optional name for output node.
1749
+ :return: The node performing element-wise Mod operation.
1750
+ """
1751
+ return _get_node_factory_opset1().create(
1752
+ "Mod",
1753
+ [left_node, right_node],
1754
+ {"auto_broadcast": auto_broadcast.upper()},
1755
+ )
1756
+
1757
+
1758
+ @binary_op
1759
+ def multiply(
1760
+ left_node: NodeInput,
1761
+ right_node: NodeInput,
1762
+ auto_broadcast: str = "NUMPY",
1763
+ name: Optional[str] = None,
1764
+ ) -> Node:
1765
+ """Return node which applies f(A,B) = A*B to the input nodes elementwise.
1766
+
1767
+ :param left_node: The first input node for multiply operation.
1768
+ :param right_node: The second input node for multiply operation.
1769
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1770
+ auto-broadcasting of input tensors. Defaults to "NUMPY".
1771
+ :param name: The optional name for output new node.
1772
+ :return: The node performing element-wise multiplication.
1773
+ """
1774
+ return _get_node_factory_opset1().create(
1775
+ "Multiply",
1776
+ [left_node, right_node],
1777
+ {"auto_broadcast": auto_broadcast.upper()},
1778
+ )
1779
+
1780
+
1781
+ @unary_op
1782
+ def negative(node: NodeInput, name: Optional[str] = None) -> Node:
1783
+ """Return node which applies f(x) = -x to the input node elementwise.
1784
+
1785
+ :param node: Input node for negative operation.
1786
+ :param name: The optional name for output new node.
1787
+ :return: The node performing element-wise multiplicaion by -1.
1788
+ """
1789
+ return _get_node_factory_opset1().create("Negative", [node])
1790
+
1791
+
1792
+ @nameable_op
1793
+ def non_max_suppression(
1794
+ boxes: NodeInput,
1795
+ scores: NodeInput,
1796
+ max_output_boxes_per_class: Optional[NodeInput] = None,
1797
+ iou_threshold: Optional[NodeInput] = None,
1798
+ score_threshold: Optional[NodeInput] = None,
1799
+ box_encoding: str = "corner",
1800
+ sort_result_descending: bool = True,
1801
+ name: Optional[str] = None,
1802
+ ) -> Node:
1803
+ """Return a node which performs NonMaxSuppression.
1804
+
1805
+ :param boxes: Tensor with box coordinates.
1806
+ :param scores: Tensor with box scores.
1807
+ :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
1808
+ to be selected per class.
1809
+ :param iou_threshold: Tensor specifying intersection over union threshold
1810
+ :param score_threshold: Tensor specifying minimum score to consider box for the processing.
1811
+ :param box_encoding: Format of boxes data encoding. Range of values: corner or cente.
1812
+ :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
1813
+ boxes across batches or not.
1814
+ :return: The new node which performs NonMaxSuppression
1815
+ """
1816
+ if max_output_boxes_per_class is None:
1817
+ max_output_boxes_per_class = make_constant_node(0, np.int64)
1818
+ if iou_threshold is None:
1819
+ iou_threshold = make_constant_node(0, np.float32)
1820
+ if score_threshold is None:
1821
+ score_threshold = make_constant_node(0, np.float32)
1822
+
1823
+ inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, name=name)
1824
+ attributes = {
1825
+ "box_encoding": box_encoding,
1826
+ "sort_result_descending": sort_result_descending,
1827
+ }
1828
+
1829
+ return _get_node_factory_opset1().create("NonMaxSuppression", inputs, attributes)
1830
+
1831
+
1832
+ @nameable_op
1833
+ def normalize_l2(
1834
+ data: NodeInput,
1835
+ axes: NodeInput,
1836
+ eps: float,
1837
+ eps_mode: str,
1838
+ name: Optional[str] = None,
1839
+ ) -> Node:
1840
+ """Construct an NormalizeL2 operation.
1841
+
1842
+ :param data: Node producing the input tensor
1843
+ :param axes: Node indicating axes along which L2 reduction is calculated
1844
+ :param eps: The epsilon added to L2 norm
1845
+ :param eps_mode: how eps is combined with L2 value (`add` or `max`)
1846
+ :return: New node which performs the L2 normalization.
1847
+ """
1848
+ return _get_node_factory_opset1().create(
1849
+ "NormalizeL2",
1850
+ as_nodes(data, axes, name=name),
1851
+ {"eps": eps, "mode": eps_mode},
1852
+ )
1853
+
1854
+
1855
+ @binary_op
1856
+ def not_equal(
1857
+ left_node: NodeInput,
1858
+ right_node: NodeInput,
1859
+ auto_broadcast: str = "NUMPY",
1860
+ name: Optional[str] = None,
1861
+ ) -> Node:
1862
+ """Return node which checks if input nodes are unequal element-wise.
1863
+
1864
+ :param left_node: The first input node for not-equal operation.
1865
+ :param right_node: The second input node for not-equal operation.
1866
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1867
+ auto-broadcasting of input tensors.
1868
+ :param name: The optional name for output new node.
1869
+ :return: The node performing element-wise inequality check.
1870
+ """
1871
+ return _get_node_factory_opset1().create(
1872
+ "NotEqual",
1873
+ [left_node, right_node],
1874
+ {"auto_broadcast": auto_broadcast.upper()},
1875
+ )
1876
+
1877
+
1878
+ @nameable_op
1879
+ def one_hot(
1880
+ indices: NodeInput,
1881
+ depth: NodeInput,
1882
+ on_value: NodeInput,
1883
+ off_value: NodeInput,
1884
+ axis: int,
1885
+ name: Optional[str] = None,
1886
+ ) -> Node:
1887
+ """Create node performing one-hot encoding on input data.
1888
+
1889
+ :param indices: Input tensor of rank N with indices of any supported integer data type.
1890
+ :param depth: Scalar of any supported integer type that specifies number of classes and
1891
+ the size of one-hot dimension.
1892
+ :param on_value: Scalar of any type that is the value that the locations
1893
+ in output tensor represented by indices in input take.
1894
+ :param off_value: Scalar of any type that is the value that the locations not represented
1895
+ by indices in input take.
1896
+
1897
+ :param name: The optional name for new output node.
1898
+ :return: New node performing one-hot operation.
1899
+ """
1900
+ return _get_node_factory_opset1().create(
1901
+ "OneHot",
1902
+ as_nodes(indices, depth, on_value, off_value, name=name),
1903
+ {"axis": axis},
1904
+ )
1905
+
1906
+
1907
+ @nameable_op
1908
+ def pad(
1909
+ arg: NodeInput,
1910
+ pads_begin: NodeInput,
1911
+ pads_end: NodeInput,
1912
+ pad_mode: str,
1913
+ arg_pad_value: Optional[NodeInput] = None,
1914
+ name: Optional[str] = None,
1915
+ ) -> Node:
1916
+ """Return a generic padding operation.
1917
+
1918
+ :param arg: The node producing input tensor to be padded.
1919
+ :param pads_begin: number of padding elements to be added before position 0
1920
+ on each axis of arg.
1921
+ :param pads_end: number of padding elements to be added after the last element.
1922
+ :param pad_mode: "constant", "edge", "reflect" or "symmetric"
1923
+ :param arg_pad_value: value used for padding if pad_mode is "constant"
1924
+ :return: Pad operation node.
1925
+ """
1926
+ input_nodes = as_nodes(arg, pads_begin, pads_end, name=name)
1927
+ if arg_pad_value:
1928
+ input_nodes.append(as_node(arg_pad_value, name=name))
1929
+
1930
+ pad_mode = pad_mode.upper()
1931
+ return _get_node_factory_opset1().create("Pad", input_nodes, {"pad_mode": pad_mode})
1932
+
1933
+
1934
+ @nameable_op
1935
+ def parameter(
1936
+ shape: TensorShape,
1937
+ dtype: Union[NumericType, Type] = np.float32,
1938
+ name: Optional[str] = None,
1939
+ ) -> Parameter:
1940
+ """Return an openvino Parameter object.
1941
+
1942
+ :param shape: The shape of the output tensor.
1943
+ :param dtype: The type of elements of the output tensor. Defaults to np.float32.
1944
+ :param name: The optional name for output new node.
1945
+ :return: The node that specifies input to the model.
1946
+ """
1947
+ return Parameter(
1948
+ get_element_type(dtype) if isinstance(dtype, (type, np.dtype)) else dtype,
1949
+ PartialShape(shape),
1950
+ )
1951
+
1952
+
1953
+ @binary_op
1954
+ def power(
1955
+ left_node: NodeInput,
1956
+ right_node: NodeInput,
1957
+ auto_broadcast: str = "NUMPY",
1958
+ name: Optional[str] = None,
1959
+ ) -> Node:
1960
+ """Return node which perform element-wise exponentiation operation.
1961
+
1962
+ :param left_node: The node providing the base of operation.
1963
+ :param right_node: The node providing the exponent of operation.
1964
+ :param name: The optional name for the new output node.
1965
+ :param auto_broadcast: The type of broadcasting specifies rules used for
1966
+ auto-broadcasting of input tensors.
1967
+ :return: The new node performing element-wise exponentiation operation on input nodes.
1968
+ """
1969
+ return _get_node_factory_opset1().create(
1970
+ "Power",
1971
+ [left_node, right_node],
1972
+ {"auto_broadcast": auto_broadcast.upper()},
1973
+ )
1974
+
1975
+
1976
+ @nameable_op
1977
+ def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node:
1978
+ """Perform Parametrized Relu operation element-wise on data from input node.
1979
+
1980
+ :param data: The node with data tensor.
1981
+ :param slope: The node with the multipliers for negative values.
1982
+ :param name: Optional output node name.
1983
+ :return: The new node performing a PRelu operation on tensor's channels.
1984
+
1985
+ PRelu uses the following logic:
1986
+
1987
+ .. code-block:: python
1988
+
1989
+ if data < 0:
1990
+ data = data * slope
1991
+ elif data >= 0:
1992
+ data = data
1993
+ """
1994
+ return _get_node_factory_opset1().create("PRelu", as_nodes(data, slope, name=name))
1995
+
1996
+
1997
+ @nameable_op
1998
+ def prior_box_clustered(
1999
+ output_size: Node,
2000
+ image_size: NodeInput,
2001
+ attrs: dict,
2002
+ name: Optional[str] = None,
2003
+ ) -> Node:
2004
+ """Generate prior boxes of specified sizes normalized to the input image size.
2005
+
2006
+ :param output_size: 1D tensor with two integer elements [height, width]. Specifies the
2007
+ spatial size of generated grid with boxes.
2008
+ :param image_size: 1D tensor with two integer elements [image_height, image_width] that
2009
+ specifies shape of the image for which boxes are generated.
2010
+ :param attrs: The dictionary containing key, value pairs for attributes.
2011
+ :param name: Optional name for the output node.
2012
+ :return: Node representing PriorBoxClustered operation.
2013
+
2014
+ Available attributes are:
2015
+
2016
+ * widths Specifies desired boxes widths in pixels.
2017
+ Range of values: floating point positive numbers.
2018
+ Default value: 1.0
2019
+ Required: no
2020
+
2021
+ * heights Specifies desired boxes heights in pixels.
2022
+ Range of values: floating point positive numbers.
2023
+ Default value: 1.0
2024
+ Required: no
2025
+
2026
+ * clip The flag that denotes if each value in the output tensor should be clipped
2027
+ within [0,1].
2028
+ Range of values: {True, False}
2029
+ Default value: True
2030
+ Required: no
2031
+
2032
+ * step_widths The distance between box centers.
2033
+ Range of values: floating point positive number
2034
+ Default value: 0.0
2035
+ Required: no
2036
+
2037
+ * step_heights The distance between box centers.
2038
+ Range of values: floating point positive number
2039
+ Default value: 0.0
2040
+ Required: no
2041
+
2042
+ * offset The shift of box respectively to the top left corner.
2043
+ Range of values: floating point positive number
2044
+ Default value: None
2045
+ Required: yes
2046
+
2047
+ * variance Denotes a variance of adjusting bounding boxes.
2048
+ Range of values: floating point positive numbers
2049
+ Default value: []
2050
+ Required: no
2051
+
2052
+ Example of attribute dictionary:
2053
+
2054
+ .. code-block:: python
2055
+
2056
+ # just required ones
2057
+ attrs = {
2058
+ 'offset': 85,
2059
+ }
2060
+
2061
+ attrs = {
2062
+ 'offset': 85,
2063
+ 'clip': False,
2064
+ 'step_widths': [1.5, 2.0, 2.5]
2065
+ }
2066
+
2067
+ Optional attributes which are absent from dictionary will be set with corresponding default.
2068
+ """
2069
+ requirements = [
2070
+ ("widths", False, np.floating, is_positive_value),
2071
+ ("heights", False, np.floating, is_positive_value),
2072
+ ("clip", False, np.bool_, None),
2073
+ ("step_widths", False, np.floating, is_positive_value),
2074
+ ("step_heights", False, np.floating, is_positive_value),
2075
+ ("offset", True, np.floating, is_positive_value),
2076
+ ("variance", False, np.floating, is_positive_value),
2077
+ ]
2078
+
2079
+ check_valid_attributes("PriorBoxClustered", attrs, requirements)
2080
+
2081
+ return _get_node_factory_opset1().create(
2082
+ "PriorBoxClustered",
2083
+ [output_size, as_node(image_size, name=name)],
2084
+ attrs,
2085
+ )
2086
+
2087
+
2088
+ @nameable_op
2089
+ def prior_box(
2090
+ layer_shape: Node,
2091
+ image_shape: NodeInput,
2092
+ attrs: dict,
2093
+ name: Optional[str] = None,
2094
+ ) -> Node:
2095
+ """Generate prior boxes of specified sizes and aspect ratios across all dimensions.
2096
+
2097
+ :param layer_shape: Shape of layer for which prior boxes are computed.
2098
+ :param image_shape: Shape of image to which prior boxes are scaled.
2099
+ :param attrs: The dictionary containing key, value pairs for attributes.
2100
+ :param name: Optional name for the output node.
2101
+ :return: Node representing prior box operation.
2102
+
2103
+ Available attributes are:
2104
+
2105
+ * min_size The minimum box size (in pixels).
2106
+ Range of values: positive floating point numbers
2107
+ Default value: []
2108
+ Required: no
2109
+
2110
+ * max_size The maximum box size (in pixels).
2111
+ Range of values: positive floating point numbers
2112
+ Default value: []
2113
+ Required: no
2114
+
2115
+ * aspect_ratio Aspect ratios of prior boxes.
2116
+ Range of values: set of positive floating point numbers
2117
+ Default value: []
2118
+ Required: no
2119
+
2120
+ * flip The flag that denotes that each aspect_ratio is duplicated and flipped.
2121
+ Range of values: {True, False}
2122
+ Default value: False
2123
+ Required: no
2124
+
2125
+ * clip The flag that denotes if each value in the output tensor should be clipped
2126
+ to [0,1] interval.
2127
+ Range of values: {True, False}
2128
+ Default value: False
2129
+ Required: no
2130
+
2131
+ * step The distance between box centers.
2132
+ Range of values: floating point non-negative number
2133
+ Default value: 0
2134
+ Required: no
2135
+
2136
+ * offset This is a shift of box respectively to top left corner.
2137
+ Range of values: floating point non-negative number
2138
+ Default value: None
2139
+ Required: yes
2140
+
2141
+ * variance The variance denotes a variance of adjusting bounding boxes. The attribute
2142
+ could contain 0, 1 or 4 elements.
2143
+ Range of values: floating point positive numbers
2144
+ Default value: []
2145
+ Required: no
2146
+
2147
+ * scale_all_sizes The flag that denotes type of inference.
2148
+ Range of values: False - max_size is ignored
2149
+ True - max_size is used
2150
+
2151
+ Default value: True
2152
+ Required: no
2153
+
2154
+ * fixed_ratio This is an aspect ratio of a box.
2155
+ Range of values: a list of positive floating-point numbers
2156
+ Default value: None
2157
+ Required: no
2158
+
2159
+ * fixed_size This is an initial box size (in pixels).
2160
+ Range of values: a list of positive floating-point numbers
2161
+ Default value: None
2162
+ Required: no
2163
+
2164
+ * density This is the square root of the number of boxes of each type.
2165
+ Range of values: a list of positive floating-point numbers
2166
+ Default value: None
2167
+ Required: no
2168
+
2169
+ Example of attribute dictionary:
2170
+
2171
+ .. code-block:: python
2172
+
2173
+ # just required ones
2174
+ attrs = {
2175
+ 'offset': 85,
2176
+ }
2177
+
2178
+ attrs = {
2179
+ 'offset': 85,
2180
+ 'flip': True,
2181
+ 'clip': True,
2182
+ 'fixed_size': [32, 64, 128]
2183
+ }
2184
+
2185
+ Optional attributes which are absent from dictionary will be set with corresponding default.
2186
+ """
2187
+ requirements = [
2188
+ ("offset", True, np.floating, is_non_negative_value),
2189
+ ("min_size", False, np.floating, is_positive_value),
2190
+ ("max_size", False, np.floating, is_positive_value),
2191
+ ("aspect_ratio", False, np.floating, is_positive_value),
2192
+ ("flip", False, np.bool_, None),
2193
+ ("clip", False, np.bool_, None),
2194
+ ("step", False, np.floating, is_non_negative_value),
2195
+ ("variance", False, np.floating, is_positive_value),
2196
+ ("scale_all_sizes", False, np.bool_, None),
2197
+ ("fixed_ratio", False, np.floating, is_positive_value),
2198
+ ("fixed_size", False, np.floating, is_positive_value),
2199
+ ("density", False, np.floating, is_positive_value),
2200
+ ]
2201
+
2202
+ check_valid_attributes("PriorBox", attrs, requirements)
2203
+
2204
+ return _get_node_factory_opset1().create(
2205
+ "PriorBox",
2206
+ [layer_shape, as_node(image_shape, name=name)],
2207
+ attrs,
2208
+ )
2209
+
2210
+
2211
+ @nameable_op
2212
+ def proposal(
2213
+ class_probs: Node,
2214
+ bbox_deltas: Node,
2215
+ image_shape: NodeInput,
2216
+ attrs: dict,
2217
+ name: Optional[str] = None,
2218
+ ) -> Node:
2219
+ """Filter bounding boxes and outputs only those with the highest prediction confidence.
2220
+
2221
+ :param class_probs: 4D input floating point tensor with class prediction scores.
2222
+ :param bbox_deltas: 4D input floating point tensor with box logits.
2223
+ :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape.
2224
+ :param attrs: The dictionary containing key, value pairs for attributes.
2225
+ :param name: Optional name for the output node.
2226
+
2227
+ :return: Node representing Proposal operation.
2228
+
2229
+ * base_size The size of the anchor to which scale and ratio attributes are applied.
2230
+ Range of values: a positive unsigned integer number
2231
+ Default value: None
2232
+ Required: yes
2233
+
2234
+ * pre_nms_topn The number of bounding boxes before the NMS operation.
2235
+ Range of values: a positive unsigned integer number
2236
+ Default value: None
2237
+ Required: yes
2238
+
2239
+ * post_nms_topn The number of bounding boxes after the NMS operation.
2240
+ Range of values: a positive unsigned integer number
2241
+ Default value: None
2242
+ Required: yes
2243
+
2244
+ * nms_thresh The minimum value of the proposal to be taken into consideration.
2245
+ Range of values: a positive floating-point number
2246
+ Default value: None
2247
+ Required: yes
2248
+
2249
+ * feat_stride The step size to slide over boxes (in pixels).
2250
+ Range of values: a positive unsigned integer
2251
+ Default value: None
2252
+ Required: yes
2253
+
2254
+ * min_size The minimum size of box to be taken into consideration.
2255
+ Range of values: a positive unsigned integer number
2256
+ Default value: None
2257
+ Required: yes
2258
+
2259
+ * ratio The ratios for anchor generation.
2260
+ Range of values: a list of floating-point numbers
2261
+ Default value: None
2262
+ Required: yes
2263
+
2264
+ * scale The scales for anchor generation.
2265
+ Range of values: a list of floating-point numbers
2266
+ Default value: None
2267
+ Required: yes
2268
+
2269
+ * clip_before_nms The flag that specifies whether to perform clip bounding boxes before
2270
+ non-maximum suppression or not.
2271
+ Range of values: True or False
2272
+ Default value: True
2273
+ Required: no
2274
+
2275
+ * clip_after_nms The flag that specifies whether to perform clip bounding boxes after
2276
+ non-maximum suppression or not.
2277
+ Range of values: True or False
2278
+ Default value: False
2279
+ Required: no
2280
+
2281
+ * normalize The flag that specifies whether to perform normalization of output boxes to
2282
+ [0,1] interval or not.
2283
+ Range of values: True or False
2284
+ Default value: False
2285
+ Required: no
2286
+
2287
+ * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding.
2288
+ Range of values: a positive floating-point number
2289
+ Default value: 1.0
2290
+ Required: no
2291
+
2292
+ * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates
2293
+ before decoding.
2294
+ Range of values: a positive floating-point number
2295
+ Default value: 1.0
2296
+ Required: no
2297
+
2298
+ * framework Specifies how the box coordinates are calculated.
2299
+ Range of values: "" (empty string) - calculate box coordinates like in Caffe*
2300
+ tensorflow - calculate box coordinates like in the TensorFlow*
2301
+ Object Detection API models
2302
+
2303
+ Default value: "" (empty string)
2304
+ Required: no
2305
+
2306
+ Example of attribute dictionary:
2307
+
2308
+ .. code-block:: python
2309
+
2310
+ # just required ones
2311
+ attrs = {
2312
+ 'base_size': 85,
2313
+ 'pre_nms_topn': 10,
2314
+ 'post_nms_topn': 20,
2315
+ 'nms_thresh': 0.34,
2316
+ 'feat_stride': 16,
2317
+ 'min_size': 32,
2318
+ 'ratio': [0.1, 1.5, 2.0, 2.5],
2319
+ 'scale': [2, 3, 3, 4],
2320
+ }
2321
+
2322
+ Optional attributes which are absent from dictionary will be set with corresponding default.
2323
+
2324
+ """
2325
+ requirements = [
2326
+ ("base_size", True, np.unsignedinteger, is_positive_value),
2327
+ ("pre_nms_topn", True, np.unsignedinteger, is_positive_value),
2328
+ ("post_nms_topn", True, np.unsignedinteger, is_positive_value),
2329
+ ("nms_thresh", True, np.floating, is_positive_value),
2330
+ ("feat_stride", True, np.unsignedinteger, is_positive_value),
2331
+ ("min_size", True, np.unsignedinteger, is_positive_value),
2332
+ ("ratio", True, np.floating, None),
2333
+ ("scale", True, np.floating, None),
2334
+ ("clip_before_nms", False, np.bool_, None),
2335
+ ("clip_after_nms", False, np.bool_, None),
2336
+ ("normalize", False, np.bool_, None),
2337
+ ("box_size_scale", False, np.floating, is_positive_value),
2338
+ ("box_coordinate_scale", False, np.floating, is_positive_value),
2339
+ ("framework", False, np.str_, None),
2340
+ ]
2341
+
2342
+ check_valid_attributes("Proposal", attrs, requirements)
2343
+
2344
+ return _get_node_factory_opset1().create(
2345
+ "Proposal",
2346
+ [class_probs, bbox_deltas, as_node(image_shape, name=name)],
2347
+ attrs,
2348
+ )
2349
+
2350
+
2351
+ @nameable_op
2352
+ def psroi_pooling(
2353
+ input: NodeInput,
2354
+ coords: NodeInput,
2355
+ output_dim: int,
2356
+ group_size: int,
2357
+ spatial_scale: float,
2358
+ spatial_bins_x: int,
2359
+ spatial_bins_y: int,
2360
+ mode: str,
2361
+ name: Optional[str] = None,
2362
+ ) -> Node:
2363
+ """Return a node which produces a PSROIPooling operation.
2364
+
2365
+ :param input: Input feature map `{N, C, ...}`.
2366
+ :param coords: Coordinates of bounding boxes.
2367
+ :param output_dim: Output channel number.
2368
+ :param group_size: Number of groups to encode position-sensitive scores.
2369
+ :param spatial_scale: Ratio of input feature map over input image size.
2370
+ :param spatial_bins_x: Numbers of bins to divide the input feature maps over.
2371
+ :param spatial_bins_y: Numbers of bins to divide the input feature maps over.
2372
+ :param mode: Mode of pooling - "avg" or "bilinear".
2373
+ :return: PSROIPooling node
2374
+ """
2375
+ mode = mode.lower()
2376
+ return _get_node_factory_opset1().create(
2377
+ "PSROIPooling",
2378
+ as_nodes(input, coords, name=name),
2379
+ {
2380
+ "output_dim": output_dim,
2381
+ "group_size": group_size,
2382
+ "spatial_scale": spatial_scale,
2383
+ "spatial_bins_x": spatial_bins_x,
2384
+ "spatial_bins_y": spatial_bins_y,
2385
+ "mode": mode,
2386
+ },
2387
+ )
2388
+
2389
+
2390
+ @nameable_op
2391
+ def range(
2392
+ start: Node,
2393
+ stop: NodeInput,
2394
+ step: NodeInput,
2395
+ name: Optional[str] = None,
2396
+ ) -> Node:
2397
+ """Return a node which produces the Range operation.
2398
+
2399
+ :param start: The start value of the generated range.
2400
+ :param stop: The stop value of the generated range.
2401
+ :param step: The step value for the generated range.
2402
+ :param name: Optional name for output node.
2403
+ :return: Range node
2404
+ """
2405
+ return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step, name=name))
2406
+
2407
+
2408
+ @unary_op
2409
+ def relu(node: NodeInput, name: Optional[str] = None) -> Node:
2410
+ """Perform rectified linear unit operation on input node element-wise.
2411
+
2412
+ :param node: One of: input node, array or scalar.
2413
+ :param name: The optional output node name.
2414
+ :return: The new node performing relu operation on its input element-wise.
2415
+ """
2416
+ return _get_node_factory_opset1().create("Relu", [node])
2417
+
2418
+
2419
+ @nameable_op
2420
+ def reduce_logical_and(
2421
+ node: NodeInput,
2422
+ reduction_axes: NodeInput,
2423
+ keep_dims: bool = False,
2424
+ name: Optional[str] = None,
2425
+ ) -> Node:
2426
+ """Logical AND reduction operation on input tensor, eliminating the specified reduction axes.
2427
+
2428
+ :param node: The tensor we want to reduce.
2429
+ :param reduction_axes: The axes to eliminate through AND operation.
2430
+ :param keep_dims: If set to True it holds axes that are used for reduction.
2431
+ :param name: Optional name for output node.
2432
+ :return: The new node performing reduction operation.
2433
+ """
2434
+ return _get_node_factory_opset1().create(
2435
+ "ReduceLogicalAnd",
2436
+ as_nodes(node, reduction_axes, name=name),
2437
+ {"keep_dims": keep_dims},
2438
+ )
2439
+
2440
+
2441
+ @nameable_op
2442
+ def reduce_logical_or(
2443
+ node: NodeInput,
2444
+ reduction_axes: NodeInput,
2445
+ keep_dims: bool = False,
2446
+ name: Optional[str] = None,
2447
+ ) -> Node:
2448
+ """Logical OR reduction operation on input tensor, eliminating the specified reduction axes.
2449
+
2450
+ :param node: The tensor we want to reduce.
2451
+ :param reduction_axes: The axes to eliminate through OR operation.
2452
+ :param keep_dims: If set to True it holds axes that are used for reduction.
2453
+ :param name: Optional name for output node.
2454
+ :return: The new node performing reduction operation.
2455
+ """
2456
+ return _get_node_factory_opset1().create(
2457
+ "ReduceLogicalOr",
2458
+ as_nodes(node, reduction_axes, name=name),
2459
+ {"keep_dims": keep_dims},
2460
+ )
2461
+
2462
+
2463
+ @nameable_op
2464
+ def reduce_max(
2465
+ node: NodeInput,
2466
+ reduction_axes: NodeInput,
2467
+ keep_dims: bool = False,
2468
+ name: Optional[str] = None,
2469
+ ) -> Node:
2470
+ """Max-reduction operation on input tensor, eliminating the specified reduction axes.
2471
+
2472
+ :param node: The tensor we want to max-reduce.
2473
+ :param reduction_axes: The axes to eliminate through max operation.
2474
+ :param keep_dims: If set to True it holds axes that are used for reduction.
2475
+ :param name: Optional name for output node.
2476
+ """
2477
+ return _get_node_factory_opset1().create(
2478
+ "ReduceMax",
2479
+ as_nodes(node, reduction_axes, name=name),
2480
+ {"keep_dims": keep_dims},
2481
+ )
2482
+
2483
+
2484
+ @nameable_op
2485
+ def reduce_mean(
2486
+ node: NodeInput,
2487
+ reduction_axes: NodeInput,
2488
+ keep_dims: bool = False,
2489
+ name: Optional[str] = None,
2490
+ ) -> Node:
2491
+ """Mean-reduction operation on input tensor, eliminating the specified reduction axes.
2492
+
2493
+ :param node: The tensor we want to mean-reduce.
2494
+ :param reduction_axes: The axes to eliminate through mean operation.
2495
+ :param keep_dims: If set to True it holds axes that are used for reduction.
2496
+ :param name: Optional name for output node.
2497
+ :return: The new node performing mean-reduction operation.
2498
+ """
2499
+ return _get_node_factory_opset1().create(
2500
+ "ReduceMean",
2501
+ as_nodes(node, reduction_axes, name=name),
2502
+ {"keep_dims": keep_dims},
2503
+ )
2504
+
2505
+
2506
+ @nameable_op
2507
+ def reduce_min(
2508
+ node: NodeInput,
2509
+ reduction_axes: NodeInput,
2510
+ keep_dims: bool = False,
2511
+ name: Optional[str] = None,
2512
+ ) -> Node:
2513
+ """Min-reduction operation on input tensor, eliminating the specified reduction axes.
2514
+
2515
+ :param node: The tensor we want to min-reduce.
2516
+ :param reduction_axes: The axes to eliminate through min operation.
2517
+ :param keep_dims: If set to True it holds axes that are used for reduction
2518
+ :param name: Optional name for output node.
2519
+ """
2520
+ return _get_node_factory_opset1().create(
2521
+ "ReduceMin",
2522
+ as_nodes(node, reduction_axes, name=name),
2523
+ {"keep_dims": keep_dims},
2524
+ )
2525
+
2526
+
2527
+ @nameable_op
2528
+ def reduce_prod(
2529
+ node: NodeInput,
2530
+ reduction_axes: NodeInput,
2531
+ keep_dims: bool = False,
2532
+ name: Optional[str] = None,
2533
+ ) -> Node:
2534
+ """Product-reduction operation on input tensor, eliminating the specified reduction axes.
2535
+
2536
+ :param node: The tensor we want to product-reduce.
2537
+ :param reduction_axes: The axes to eliminate through product operation.
2538
+ :param keep_dims: If set to True it holds axes that are used for reduction
2539
+ :param name: Optional name for output node.
2540
+ :return: The new node performing product-reduction operation.
2541
+ """
2542
+ return _get_node_factory_opset1().create(
2543
+ "ReduceProd",
2544
+ as_nodes(node, reduction_axes, name=name),
2545
+ {"keep_dims": keep_dims},
2546
+ )
2547
+
2548
+
2549
+ @nameable_op
2550
+ def reduce_sum(
2551
+ node: NodeInput,
2552
+ reduction_axes: NodeInput,
2553
+ keep_dims: bool = False,
2554
+ name: Optional[str] = None,
2555
+ ) -> Node:
2556
+ """Perform element-wise sums of the input tensor, eliminating the specified reduction axes.
2557
+
2558
+ :param node: The node providing data for operation.
2559
+ :param reduction_axes: The axes to eliminate through summation.
2560
+ :param keep_dims: If set to True it holds axes that are used for reduction
2561
+ :param name: The optional new name for output node.
2562
+ :return: The new node performing summation along `reduction_axes` element-wise.
2563
+ """
2564
+ return _get_node_factory_opset1().create(
2565
+ "ReduceSum",
2566
+ as_nodes(node, reduction_axes, name=name),
2567
+ {"keep_dims": keep_dims},
2568
+ )
2569
+
2570
+
2571
+ @nameable_op
2572
+ def region_yolo(
2573
+ input: Node,
2574
+ coords: int,
2575
+ classes: int,
2576
+ num: int,
2577
+ do_softmax: bool,
2578
+ mask: List[int],
2579
+ axis: int,
2580
+ end_axis: int,
2581
+ anchors: Optional[List[float]] = None,
2582
+ name: Optional[str] = None,
2583
+ ) -> Node:
2584
+ """Return a node which produces the RegionYolo operation.
2585
+
2586
+ :param input: Input data
2587
+ :param coords: Number of coordinates for each region
2588
+ :param classes: Number of classes for each region
2589
+ :param num: Number of regions
2590
+ :param do_softmax: Compute softmax
2591
+ :param mask: Mask
2592
+ :param axis: Axis to begin softmax on
2593
+ :param end_axis: Axis to end softmax on
2594
+ :param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes
2595
+ :param name: Optional name for output node.
2596
+ :return: RegionYolo node
2597
+ """
2598
+ if anchors is None:
2599
+ anchors = []
2600
+
2601
+ return _get_node_factory_opset1().create(
2602
+ "RegionYolo",
2603
+ [input],
2604
+ {
2605
+ "coords": coords,
2606
+ "classes": classes,
2607
+ "num": num,
2608
+ "do_softmax": do_softmax,
2609
+ "mask": mask,
2610
+ "axis": axis,
2611
+ "end_axis": end_axis,
2612
+ "anchors": anchors,
2613
+ },
2614
+ )
2615
+
2616
+
2617
+ @nameable_op
2618
+ def reshape(
2619
+ node: NodeInput,
2620
+ output_shape: NodeInput,
2621
+ special_zero: bool,
2622
+ name: Optional[str] = None,
2623
+ ) -> Node:
2624
+ """Return reshaped node according to provided parameters.
2625
+
2626
+ :param node: The tensor we want to reshape.
2627
+ :param output_shape: The node with a new shape for input tensor.
2628
+ :param special_zero: The boolean variable that controls how zero values in shape are
2629
+ interpreted. If special_zero is false, then 0 is interpreted as-is
2630
+ which means that output shape will contain a zero dimension at the
2631
+ specified location. Input and output tensors are empty in this case.
2632
+ If special_zero is true, then all zeros in shape implies the copying
2633
+ of corresponding dimensions from data.shape into the output shape.
2634
+ Range of values: False or True
2635
+ :return: The node reshaping an input tensor.
2636
+ """
2637
+ return _get_node_factory_opset1().create(
2638
+ "Reshape",
2639
+ as_nodes(node, output_shape, name=name),
2640
+ {"special_zero": special_zero},
2641
+ )
2642
+
2643
+
2644
+ @unary_op
2645
+ def result(data: NodeInput, name: Optional[str] = None) -> Node:
2646
+ """Return a node which represents an output of a graph (Model).
2647
+
2648
+ :param data: The tensor containing the input data
2649
+ :return: Result node
2650
+ """
2651
+ return _get_node_factory_opset1().create("Result", [data])
2652
+
2653
+
2654
+ @nameable_op
2655
+ def reverse_sequence(
2656
+ input: NodeInput,
2657
+ seq_lengths: NodeInput,
2658
+ batch_axis: NumericData,
2659
+ seq_axis: NumericData,
2660
+ name: Optional[str] = None,
2661
+ ) -> Node:
2662
+ """Return a node which produces a ReverseSequence operation.
2663
+
2664
+ :param input: tensor with input data to reverse
2665
+ :param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor.
2666
+ :param batch_axis: index of the batch dimension.
2667
+ :param seq_axis: index of the sequence dimension.
2668
+ :return: ReverseSequence node
2669
+ """
2670
+ return _get_node_factory_opset1().create(
2671
+ "ReverseSequence",
2672
+ as_nodes(input, seq_lengths, name=name),
2673
+ {"batch_axis": batch_axis, "seq_axis": seq_axis},
2674
+ )
2675
+
2676
+
2677
+ @nameable_op
2678
+ def select(
2679
+ cond: NodeInput,
2680
+ then_node: NodeInput,
2681
+ else_node: NodeInput,
2682
+ auto_broadcast: str = "numpy",
2683
+ name: Optional[str] = None,
2684
+ ) -> Node:
2685
+ """Perform an element-wise selection operation on input tensors.
2686
+
2687
+ :param cond: Tensor with selection mask of type `boolean`.
2688
+ :param then_node: Tensor providing data to be selected if respective `cond`
2689
+ item value is `True`.
2690
+ :param else_node: Tensor providing data to be selected if respective `cond`
2691
+ item value is `False`.
2692
+ :param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors.
2693
+ :param name: The optional new name for output node.
2694
+ :return: The new node with values selected according to provided arguments.
2695
+ """
2696
+ inputs = as_nodes(cond, then_node, else_node, name=name)
2697
+ return _get_node_factory_opset1().create(
2698
+ "Select",
2699
+ inputs,
2700
+ {"auto_broadcast": auto_broadcast.upper()},
2701
+ )
2702
+
2703
+
2704
+ @nameable_op
2705
+ def selu(
2706
+ data: NodeInput,
2707
+ alpha: NodeInput,
2708
+ lambda_value: NodeInput,
2709
+ name: Optional[str] = None,
2710
+ ) -> Node:
2711
+ """Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise.
2712
+
2713
+ :param data: input node, array or scalar.
2714
+ :param alpha: Alpha coefficient of SELU operation
2715
+ :param lambda_value: Lambda coefficient of SELU operation
2716
+ :param name: The optional output node name.
2717
+ :return: The new node performing relu operation on its input element-wise.
2718
+ """
2719
+ return _get_node_factory_opset1().create(
2720
+ "Selu",
2721
+ as_nodes(data, alpha, lambda_value, name=name),
2722
+ )
2723
+
2724
+
2725
+ @nameable_op
2726
+ def shape_of(data: NodeInput, name: Optional[str] = None) -> Node:
2727
+ """Return a node which produces a tensor containing the shape of its input data.
2728
+
2729
+ :param data: The tensor containing the input data.
2730
+ :return: ShapeOf node
2731
+ """
2732
+ return _get_node_factory_opset1().create("ShapeOf", [as_node(data, name=name)])
2733
+
2734
+
2735
+ @unary_op
2736
+ def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node:
2737
+ """Return a node which applies the sigmoid function element-wise.
2738
+
2739
+ :param data: The tensor containing the input data
2740
+ :return: Sigmoid node
2741
+ """
2742
+ return _get_node_factory_opset1().create("Sigmoid", [data])
2743
+
2744
+
2745
+ @unary_op
2746
+ def sign(node: NodeInput, name: Optional[str] = None) -> Node:
2747
+ """Perform element-wise sign operation.
2748
+
2749
+ :param node: One of: input node, array or scalar.
2750
+ :param name: The optional new name for output node.
2751
+ :return: The node with mapped elements of the input tensor to -1 (if it is negative),
2752
+ 0 (if it is zero), or 1 (if it is positive).
2753
+ """
2754
+ return _get_node_factory_opset1().create("Sign", [node])
2755
+
2756
+
2757
+ @unary_op
2758
+ def sin(node: NodeInput, name: Optional[str] = None) -> Node:
2759
+ """Apply sine function on the input node element-wise.
2760
+
2761
+ :param node: One of: input node, array or scalar.
2762
+ :param name: Optional new name for output node.
2763
+ :return: New node with sin operation applied on it.
2764
+ """
2765
+ return _get_node_factory_opset1().create("Sin", [node])
2766
+
2767
+
2768
+ @unary_op
2769
+ def sinh(node: NodeInput, name: Optional[str] = None) -> Node:
2770
+ """Apply hyperbolic sine function on the input node element-wise.
2771
+
2772
+ :param node: One of: input node, array or scalar.
2773
+ :param name: Optional new name for output node.
2774
+ :return: New node with sin operation applied on it.
2775
+ """
2776
+ return _get_node_factory_opset1().create("Sinh", [node])
2777
+
2778
+
2779
+ @nameable_op
2780
+ def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
2781
+ """Apply softmax operation on each element of input tensor.
2782
+
2783
+ :param data: The tensor providing input data.
2784
+ :param axis: An axis along which Softmax should be calculated
2785
+ :return: The new node with softmax operation applied on each element.
2786
+ """
2787
+ return _get_node_factory_opset1().create("Softmax", [as_node(data, name=name)], {"axis": axis})
2788
+
2789
+
2790
+ @nameable_op
2791
+ def space_to_depth(data: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node:
2792
+ """Perform SpaceToDepth operation on the input tensor.
2793
+
2794
+ SpaceToDepth rearranges blocks of spatial data into depth.
2795
+ The operator :return: a copy of the input tensor where values from the height
2796
+ and width dimensions are moved to the depth dimension.
2797
+
2798
+ :param data: The node with data tensor.
2799
+ :param mode: Specifies how the output depth dimension is gathered from block coordinates.
2800
+
2801
+ blocks_first: The output depth is gathered from [block_size, ..., block_size, C]
2802
+ depth_first: The output depth is gathered from [C, block_size, ..., block_size]
2803
+
2804
+ :param block_size: The size of the block of values to be moved. Scalar value.
2805
+ :param name: Optional output node name.
2806
+ :return: The new node performing a SpaceToDepth operation on input tensor.
2807
+ """
2808
+ return _get_node_factory_opset1().create(
2809
+ "SpaceToDepth",
2810
+ [data],
2811
+ {"mode": mode, "block_size": block_size},
2812
+ )
2813
+
2814
+
2815
+ @nameable_op
2816
+ def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] = None) -> Node:
2817
+ """Return a node which splits the input tensor into same-length slices.
2818
+
2819
+ :param data: The input tensor to be split
2820
+ :param axis: Axis along which the input data will be split
2821
+ :param num_splits: Number of the output tensors that should be produced
2822
+ :return: Split node
2823
+ """
2824
+ return _get_node_factory_opset1().create(
2825
+ "Split",
2826
+ as_nodes(data, axis, name=name),
2827
+ {"num_splits": num_splits},
2828
+ )
2829
+
2830
+
2831
+ @unary_op
2832
+ def sqrt(node: NodeInput, name: Optional[str] = None) -> Node:
2833
+ """Return node which applies square root to the input node element-wise.
2834
+
2835
+ :param node: One of: input node, array or scalar.
2836
+ :param name: Optional new name for output node.
2837
+ :return: The new node with sqrt operation applied element-wise.
2838
+ """
2839
+ return _get_node_factory_opset1().create("Sqrt", [node])
2840
+
2841
+
2842
+ @binary_op
2843
+ def squared_difference(
2844
+ x1: NodeInput,
2845
+ x2: NodeInput,
2846
+ auto_broadcast: str = "NUMPY",
2847
+ name: Optional[str] = None,
2848
+ ) -> Node:
2849
+ r"""Perform an element-wise squared difference between two tensors.
2850
+
2851
+ \f[ y[i] = (x_1[i] - x_2[i])^2 \f]
2852
+
2853
+ :param x1: The node with first input tensor.
2854
+ :param x2: The node with second input tensor.
2855
+ :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
2856
+ to output shape axes. Range of values: numpy, explicit.
2857
+ :param name: Optional new name for output node.
2858
+ :return: The new node performing a squared difference between two tensors.
2859
+ """
2860
+ return _get_node_factory_opset1().create(
2861
+ "SquaredDifference",
2862
+ [x1, x2],
2863
+ {"auto_broadcast": auto_broadcast.upper()},
2864
+ )
2865
+
2866
+
2867
+ @nameable_op
2868
+ def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node:
2869
+ """Perform squeeze operation on input tensor.
2870
+
2871
+ :param data: The node with data tensor.
2872
+ :param axes: List of non-negative integers, indicate the dimensions to squeeze.
2873
+ One of: input node or array.
2874
+ :param name: Optional new name for output node.
2875
+ :return: The new node performing a squeeze operation on input tensor.
2876
+
2877
+ Remove single-dimensional entries from the shape of a tensor.
2878
+ Takes a parameter `axes` with a list of axes to squeeze.
2879
+ If `axes` is not provided, all the single dimensions will be removed from the shape.
2880
+ If an `axis` is selected with shape entry not equal to one, an error is raised.
2881
+
2882
+
2883
+ For example:
2884
+
2885
+ Inputs: tensor with shape [1, 2, 1, 3, 1, 1], axes=[2, 4]
2886
+
2887
+ Result: tensor with shape [1, 2, 3, 1]
2888
+ """
2889
+ return _get_node_factory_opset1().create("Squeeze", as_nodes(data, axes, name=name))
2890
+
2891
+
2892
+ @nameable_op
2893
+ def strided_slice(
2894
+ data: NodeInput,
2895
+ begin: NodeInput,
2896
+ end: NodeInput,
2897
+ strides: NodeInput,
2898
+ begin_mask: List[int],
2899
+ end_mask: List[int],
2900
+ new_axis_mask: Optional[List[int]] = None,
2901
+ shrink_axis_mask: Optional[List[int]] = None,
2902
+ ellipsis_mask: Optional[List[int]] = None,
2903
+ name: Optional[str] = None,
2904
+ ) -> Node:
2905
+ """Return a node which dynamically repeats(replicates) the input data tensor.
2906
+
2907
+ :param data: The tensor to be sliced
2908
+ :param begin: 1D tensor with begin indexes for input blob slicing
2909
+ :param end: 1D tensor with end indexes for input blob slicing
2910
+ :param strides: The slicing strides
2911
+ :param begin_mask: A mask applied to the 'begin' input indicating which elements
2912
+ shoud be ignored
2913
+ :param end_mask: A mask applied to the 'end' input indicating which elements
2914
+ shoud be ignored
2915
+ :param new_axis_mask: A mask indicating dimensions where '1' should be inserted
2916
+ :param shrink_axis_mask: A mask indicating which dimensions should be deleted
2917
+ :param ellipsis_mask: Indicates positions where missing dimensions should be inserted
2918
+ :return: StridedSlice node
2919
+ """
2920
+ if new_axis_mask is None:
2921
+ new_axis_mask = []
2922
+ if shrink_axis_mask is None:
2923
+ shrink_axis_mask = []
2924
+ if ellipsis_mask is None:
2925
+ ellipsis_mask = []
2926
+ attributes = {
2927
+ "begin_mask": begin_mask,
2928
+ "end_mask": end_mask,
2929
+ "new_axis_mask": new_axis_mask,
2930
+ "shrink_axis_mask": shrink_axis_mask,
2931
+ "ellipsis_mask": ellipsis_mask,
2932
+ }
2933
+
2934
+ return _get_node_factory_opset1().create(
2935
+ "StridedSlice",
2936
+ as_nodes(data, begin, end, strides, name=name),
2937
+ attributes,
2938
+ )
2939
+
2940
+
2941
+ @binary_op
2942
+ def subtract(
2943
+ left_node: NodeInput,
2944
+ right_node: NodeInput,
2945
+ auto_broadcast: str = "NUMPY",
2946
+ name: Optional[str] = None,
2947
+ ) -> Node:
2948
+ """Return node which applies f(x) = A-B to the input nodes element-wise.
2949
+
2950
+ :param left_node: The node providing data for left hand side of operator.
2951
+ :param right_node: The node providing data for right hand side of operator.
2952
+ :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
2953
+ to output shape axes. Range of values: numpy, explicit.
2954
+ :param name: The optional name for output node.
2955
+ :return: The new output node performing subtraction operation on both tensors element-wise.
2956
+ """
2957
+ return _get_node_factory_opset1().create(
2958
+ "Subtract",
2959
+ [left_node, right_node],
2960
+ {"auto_broadcast": auto_broadcast.upper()},
2961
+ )
2962
+
2963
+
2964
+ @unary_op
2965
+ def tan(node: NodeInput, name: Optional[str] = None) -> Node:
2966
+ """Apply tangent function on the input node element-wise.
2967
+
2968
+ :param node: One of: input node, array or scalar.
2969
+ :param name: Optional new name for output node.
2970
+ :return: New node with tan operation applied on it.
2971
+ """
2972
+ return _get_node_factory_opset1().create("Tan", [node])
2973
+
2974
+
2975
+ @unary_op
2976
+ def tanh(node: NodeInput, name: Optional[str] = None) -> Node:
2977
+ """Return node which applies hyperbolic tangent to the input node element-wise.
2978
+
2979
+ :param node: One of: input node, array or scalar.
2980
+ :param name: Optional new name for output node.
2981
+ :return: New node with tanh operation applied on it.
2982
+ """
2983
+ return _get_node_factory_opset1().create("Tanh", [node])
2984
+
2985
+
2986
+ @nameable_op
2987
+ def tile(data: NodeInput, repeats: NodeInput, name: Optional[str] = None) -> Node:
2988
+ """Return a node which dynamically repeats(replicates) the input data tensor.
2989
+
2990
+ :param data: The input tensor to be tiled
2991
+ :param repeats: Per-dimension replication factors
2992
+ :return: Tile node
2993
+ """
2994
+ return _get_node_factory_opset1().create("Tile", as_nodes(data, repeats, name=name))
2995
+
2996
+
2997
+ @nameable_op
2998
+ def topk(
2999
+ data: NodeInput,
3000
+ k: NodeInput,
3001
+ axis: int,
3002
+ mode: str,
3003
+ sort: str,
3004
+ name: Optional[str] = None,
3005
+ ) -> Node:
3006
+ """Return a node which performs TopK.
3007
+
3008
+ :param data: Input data.
3009
+ :param k: K.
3010
+ :param axis: TopK Axis.
3011
+ :param mode: Compute TopK largest ('max') or smallest ('min')
3012
+ :param sort: Order of output elements (sort by: 'none', 'index' or 'value')
3013
+ :return: The new node which performs TopK (both indices and values)
3014
+ """
3015
+ return _get_node_factory_opset1().create(
3016
+ "TopK",
3017
+ as_nodes(data, k, name=name),
3018
+ {"axis": axis, "mode": mode, "sort": sort},
3019
+ )
3020
+
3021
+
3022
+ @nameable_op
3023
+ def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node:
3024
+ """Return a node which transposes the data in the input tensor.
3025
+
3026
+ :param data: The input tensor to be transposed
3027
+ :param input_order: Permutation of axes to be applied to the input tensor
3028
+ :return: Transpose node
3029
+ """
3030
+ return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order, name=name))
3031
+
3032
+
3033
+ def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node:
3034
+ """Perform unsqueeze operation on input tensor.
3035
+
3036
+ Insert single-dimensional entries to the shape of a tensor. Takes one required argument axes,
3037
+ a list of dimensions that will be inserted.
3038
+ Dimension indices in axes are as seen in the output tensor.
3039
+
3040
+ For example: Inputs: tensor with shape [3, 4, 5], axes=[0, 4]
3041
+ Result: tensor with shape [1, 3, 4, 5, 1]
3042
+
3043
+ :param data: The node with data tensor.
3044
+ :param axes: List of non-negative integers, indicate the dimensions to be inserted.
3045
+ One of: input node or array.
3046
+ :return: The new node performing an unsqueeze operation on input tensor.
3047
+ """
3048
+ return _get_node_factory_opset1().create("Unsqueeze", as_nodes(data, axes, name=name))
3049
+
3050
+
3051
+ @nameable_op
3052
+ def variadic_split(
3053
+ data: NodeInput,
3054
+ axis: NodeInput,
3055
+ split_lengths: NodeInput,
3056
+ name: Optional[str] = None,
3057
+ ) -> Node:
3058
+ """Return a node which splits the input tensor into variadic length slices.
3059
+
3060
+ :param data: The input tensor to be split
3061
+ :param axis: Axis along which the input data will be split
3062
+ :param split_lengths: Sizes of the output tensors along the split axis
3063
+ :return: VariadicSplit node
3064
+ """
3065
+ return _get_node_factory_opset1().create(
3066
+ "VariadicSplit",
3067
+ as_nodes(data, axis, split_lengths, name=name),
3068
+ )