bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,25 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # Enums
6
+ from openvino._pyopenvino.properties.hint import Priority
7
+ from openvino._pyopenvino.properties.hint import SchedulingCoreType
8
+ from openvino._pyopenvino.properties.hint import ModelDistributionPolicy
9
+ from openvino._pyopenvino.properties.hint import ExecutionMode
10
+ from openvino._pyopenvino.properties.hint import PerformanceMode
11
+
12
+ # Properties
13
+ from openvino._pyopenvino.properties.hint import inference_precision
14
+ from openvino._pyopenvino.properties.hint import model_priority
15
+ from openvino._pyopenvino.properties.hint import performance_mode
16
+ from openvino._pyopenvino.properties.hint import enable_cpu_pinning
17
+ from openvino._pyopenvino.properties.hint import scheduling_core_type
18
+ from openvino._pyopenvino.properties.hint import model_distribution_policy
19
+ from openvino._pyopenvino.properties.hint import enable_hyper_threading
20
+ from openvino._pyopenvino.properties.hint import execution_mode
21
+ from openvino._pyopenvino.properties.hint import num_requests
22
+ from openvino._pyopenvino.properties.hint import model
23
+ from openvino._pyopenvino.properties.hint import allow_auto_batching
24
+ from openvino._pyopenvino.properties.hint import dynamic_quantization_group_size
25
+ from openvino._pyopenvino.properties.hint import kv_cache_precision
@@ -0,0 +1,7 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Generic utilities. Factor related functions out to separate files."""
6
+
7
+ from openvino._pyopenvino.util import numpy_to_c, replace_node, replace_output_update_name
@@ -0,0 +1,44 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ from typing import List, Optional
7
+
8
+ from openvino.runtime import AxisSet, Node
9
+ from openvino.runtime.utils.types import (
10
+ NodeInput,
11
+ TensorShape,
12
+ get_dtype,
13
+ make_constant_node,
14
+ )
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+
19
+ def get_broadcast_axes(
20
+ output_shape: TensorShape,
21
+ input_shape: TensorShape,
22
+ axis: Optional[int] = None,
23
+ ) -> AxisSet:
24
+ """Generate a list of broadcast axes for openvino broadcast.
25
+
26
+ Informally, a broadcast "adds" axes to the input tensor,
27
+ replicating elements from the input tensor as needed to fill the new dimensions.
28
+ Function calculate which of the output axes are added in this way.
29
+
30
+ :param output_shape: The new shape for the output tensor.
31
+ :param input_shape: The shape of input tensor.
32
+ :param axis: The axis along which we want to replicate elements.
33
+
34
+ returns: The indices of added axes.
35
+ """
36
+ axes_indexes = list(range(0, len(output_shape)))
37
+ if axis is None:
38
+ output_begin = len(output_shape) - len(input_shape)
39
+ else:
40
+ output_begin = axis
41
+ right_axes_indexes = list(range(output_begin, output_begin + len(input_shape)))
42
+ for index in reversed(right_axes_indexes):
43
+ del axes_indexes[index]
44
+ return AxisSet(set(axes_indexes))
@@ -0,0 +1,8 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from openvino.runtime.utils.data_helpers.data_dispatcher import _data_dispatch
6
+ from openvino.runtime.utils.data_helpers.wrappers import tensor_from_file
7
+ from openvino.runtime.utils.data_helpers.wrappers import _InferRequestWrapper
8
+ from openvino.runtime.utils.data_helpers.wrappers import OVDict
@@ -0,0 +1,447 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from functools import singledispatch
6
+ from typing import Any, Dict, Union, Optional
7
+
8
+ import numpy as np
9
+
10
+ from openvino._pyopenvino import ConstOutput, Tensor, Type, RemoteTensor
11
+ from openvino.runtime.utils.data_helpers.wrappers import _InferRequestWrapper, OVDict
12
+
13
+ ContainerTypes = Union[dict, list, tuple, OVDict]
14
+ ScalarTypes = Union[np.number, int, float]
15
+ ValidKeys = Union[str, int, ConstOutput]
16
+
17
+
18
+ def is_list_simple_type(input_list: list) -> bool:
19
+ for sublist in input_list:
20
+ if isinstance(sublist, list):
21
+ for element in sublist:
22
+ if not isinstance(element, (str, float, int, bytes)):
23
+ return False
24
+ else:
25
+ if not isinstance(sublist, (str, float, int, bytes)):
26
+ return False
27
+ return True
28
+
29
+
30
+ def get_request_tensor(
31
+ request: _InferRequestWrapper,
32
+ key: Optional[ValidKeys] = None,
33
+ ) -> Tensor:
34
+ if key is None:
35
+ return request.get_input_tensor()
36
+ elif isinstance(key, int):
37
+ return request.get_input_tensor(key)
38
+ elif isinstance(key, (str, ConstOutput)):
39
+ return request.get_tensor(key)
40
+ else:
41
+ raise TypeError(f"Unsupported key type: {type(key)} for Tensor under key: {key}")
42
+
43
+
44
+ @singledispatch
45
+ def value_to_tensor(
46
+ value: Union[Tensor, np.ndarray, ScalarTypes, str],
47
+ request: Optional[_InferRequestWrapper] = None,
48
+ is_shared: bool = False,
49
+ key: Optional[ValidKeys] = None,
50
+ ) -> None:
51
+ raise TypeError(f"Incompatible inputs of type: {type(value)}")
52
+
53
+
54
+ @value_to_tensor.register(Tensor)
55
+ def _(
56
+ value: Tensor,
57
+ request: Optional[_InferRequestWrapper] = None,
58
+ is_shared: bool = False,
59
+ key: Optional[ValidKeys] = None,
60
+ ) -> Tensor:
61
+ return value
62
+
63
+
64
+ @value_to_tensor.register(RemoteTensor)
65
+ def _(
66
+ value: RemoteTensor,
67
+ request: Optional[_InferRequestWrapper] = None,
68
+ is_shared: bool = False,
69
+ key: Optional[ValidKeys] = None,
70
+ ) -> RemoteTensor:
71
+ return value
72
+
73
+
74
+ @value_to_tensor.register(np.ndarray)
75
+ def _(
76
+ value: np.ndarray,
77
+ request: _InferRequestWrapper,
78
+ is_shared: bool = False,
79
+ key: Optional[ValidKeys] = None,
80
+ ) -> Tensor:
81
+ tensor = get_request_tensor(request, key)
82
+ tensor_type = tensor.get_element_type()
83
+ tensor_dtype = tensor_type.to_dtype()
84
+ # String edge-case, always copy.
85
+ # Scalars are also handled by C++.
86
+ if tensor_type == Type.string:
87
+ return Tensor(value, shared_memory=False)
88
+ # Scalars edge-case:
89
+ if value.ndim == 0:
90
+ tensor_shape = tuple(tensor.shape)
91
+ if tensor_dtype == value.dtype and tensor_shape == value.shape:
92
+ return Tensor(value, shared_memory=is_shared)
93
+ elif tensor.size == 0:
94
+ # the first infer request for dynamic input cannot reshape to 0 shape
95
+ return Tensor(value.astype(tensor_dtype).reshape((1)), shared_memory=False)
96
+ else:
97
+ return Tensor(value.astype(tensor_dtype).reshape(tensor_shape), shared_memory=False)
98
+ # WA for FP16-->BF16 edge-case, always copy.
99
+ if tensor_type == Type.bf16:
100
+ tensor = Tensor(tensor_type, value.shape)
101
+ tensor.data[:] = value.view(tensor_dtype)
102
+ return tensor
103
+ # WA for "not writeable" edge-case, always copy.
104
+ if value.flags["WRITEABLE"] is False:
105
+ tensor = Tensor(tensor_type, value.shape)
106
+ tensor.data[:] = value.astype(tensor_dtype) if tensor_dtype != value.dtype else value
107
+ return tensor
108
+ # If types are mismatched, convert and always copy.
109
+ if tensor_dtype != value.dtype:
110
+ return Tensor(value.astype(tensor_dtype), shared_memory=False)
111
+ # Otherwise, use mode defined in the call.
112
+ return Tensor(value, shared_memory=is_shared)
113
+
114
+
115
+ @value_to_tensor.register(list)
116
+ def _(
117
+ value: list,
118
+ request: _InferRequestWrapper,
119
+ is_shared: bool = False,
120
+ key: Optional[ValidKeys] = None,
121
+ ) -> Tensor:
122
+ return Tensor(value)
123
+
124
+
125
+ @value_to_tensor.register(np.number)
126
+ @value_to_tensor.register(int)
127
+ @value_to_tensor.register(float)
128
+ @value_to_tensor.register(str)
129
+ @value_to_tensor.register(bytes)
130
+ def _(
131
+ value: Union[ScalarTypes, str, bytes],
132
+ request: _InferRequestWrapper,
133
+ is_shared: bool = False,
134
+ key: Optional[ValidKeys] = None,
135
+ ) -> Tensor:
136
+ # np.number/int/float/str/bytes edge-case, copy will occur in both scenarios.
137
+ tensor_type = get_request_tensor(request, key).get_element_type()
138
+ tensor_dtype = tensor_type.to_dtype()
139
+ tmp = np.array(value)
140
+ # String edge-case -- it converts the data inside of Tensor class.
141
+ # If types are mismatched, convert.
142
+ if tensor_type != Type.string and tensor_dtype != tmp.dtype:
143
+ return Tensor(tmp.astype(tensor_dtype), shared_memory=False)
144
+ return Tensor(tmp, shared_memory=False)
145
+
146
+
147
+ def to_c_style(value: Any, is_shared: bool = False) -> Any:
148
+ if not isinstance(value, np.ndarray):
149
+ if hasattr(value, "__array__"):
150
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
151
+ # https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword
152
+ return to_c_style(np.asarray(value), is_shared) if is_shared else np.asarray(value, copy=True) # type: ignore
153
+ else:
154
+ return to_c_style(np.array(value, copy=False), is_shared) if is_shared else np.array(value, copy=True)
155
+ return value
156
+ return value if value.flags["C_CONTIGUOUS"] else np.ascontiguousarray(value)
157
+
158
+
159
+ ###
160
+ # Start of array normalization.
161
+ ###
162
+ @singledispatch
163
+ def normalize_arrays(
164
+ inputs: Any,
165
+ is_shared: bool = False,
166
+ ) -> Any:
167
+ # Check the special case of the array-interface
168
+ if hasattr(inputs, "__array__"):
169
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
170
+ # https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword
171
+ return to_c_style(np.asarray(inputs), is_shared) if is_shared else np.asarray(inputs, copy=True) # type: ignore
172
+ else:
173
+ return to_c_style(np.array(inputs, copy=False), is_shared) if is_shared else np.array(inputs, copy=True)
174
+ # Error should be raised if type does not match any dispatchers
175
+ raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
176
+
177
+
178
+ @normalize_arrays.register(dict)
179
+ def _(
180
+ inputs: dict,
181
+ is_shared: bool = False,
182
+ ) -> dict:
183
+ return {k: to_c_style(v, is_shared) if is_shared else v for k, v in inputs.items()}
184
+
185
+
186
+ @normalize_arrays.register(OVDict)
187
+ def _(
188
+ inputs: OVDict,
189
+ is_shared: bool = False,
190
+ ) -> dict:
191
+ return {i: to_c_style(v, is_shared) if is_shared else v for i, (_, v) in enumerate(inputs.items())}
192
+
193
+
194
+ @normalize_arrays.register(list)
195
+ @normalize_arrays.register(tuple)
196
+ def _(
197
+ inputs: Union[list, tuple],
198
+ is_shared: bool = False,
199
+ ) -> dict:
200
+ return {i: to_c_style(v, is_shared) if is_shared else v for i, v in enumerate(inputs)}
201
+
202
+
203
+ @normalize_arrays.register(np.ndarray)
204
+ def _(
205
+ inputs: dict,
206
+ is_shared: bool = False,
207
+ ) -> Any:
208
+ return to_c_style(inputs, is_shared) if is_shared else inputs
209
+ ###
210
+ # End of array normalization.
211
+ ###
212
+
213
+
214
+ ###
215
+ # Start of "shared" dispatcher.
216
+ # (1) Each method should keep Tensors "as-is", regardless to them being shared or not.
217
+ # (2) ...
218
+ ###
219
+ # Step to keep alive input values that are not C-style by default
220
+ @singledispatch
221
+ def create_shared(
222
+ inputs: Any,
223
+ request: _InferRequestWrapper,
224
+ ) -> None:
225
+ # Check the special case of the array-interface
226
+ if hasattr(inputs, "__array__"):
227
+ request._inputs_data = normalize_arrays(inputs, is_shared=True)
228
+ return value_to_tensor(request._inputs_data, request=request, is_shared=True)
229
+ # Error should be raised if type does not match any dispatchers
230
+ raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
231
+
232
+
233
+ @create_shared.register(dict)
234
+ @create_shared.register(tuple)
235
+ @create_shared.register(OVDict)
236
+ def _(
237
+ inputs: Union[dict, tuple, OVDict],
238
+ request: _InferRequestWrapper,
239
+ ) -> dict:
240
+ request._inputs_data = normalize_arrays(inputs, is_shared=True)
241
+ return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
242
+
243
+
244
+ # Special override to perform list-related dispatch
245
+ @create_shared.register(list)
246
+ def _(
247
+ inputs: list,
248
+ request: _InferRequestWrapper,
249
+ ) -> dict:
250
+ # If list is passed to single input model and consists only of simple types
251
+ # i.e. str/bytes/float/int, wrap around it and pass into the dispatcher.
252
+ request._inputs_data = normalize_arrays([inputs] if request._is_single_input() and is_list_simple_type(inputs) else inputs, is_shared=True)
253
+ return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
254
+
255
+
256
+ @create_shared.register(np.ndarray)
257
+ def _(
258
+ inputs: np.ndarray,
259
+ request: _InferRequestWrapper,
260
+ ) -> Tensor:
261
+ request._inputs_data = normalize_arrays(inputs, is_shared=True)
262
+ return value_to_tensor(request._inputs_data, request=request, is_shared=True)
263
+
264
+
265
+ @create_shared.register(Tensor)
266
+ @create_shared.register(np.number)
267
+ @create_shared.register(int)
268
+ @create_shared.register(float)
269
+ @create_shared.register(str)
270
+ @create_shared.register(bytes)
271
+ def _(
272
+ inputs: Union[Tensor, ScalarTypes, str, bytes],
273
+ request: _InferRequestWrapper,
274
+ ) -> Tensor:
275
+ return value_to_tensor(inputs, request=request, is_shared=True)
276
+ ###
277
+ # End of "shared" dispatcher methods.
278
+ ###
279
+
280
+
281
+ ###
282
+ # Start of "copied" dispatcher.
283
+ ###
284
+ def set_request_tensor(
285
+ request: _InferRequestWrapper,
286
+ tensor: Tensor,
287
+ key: Optional[ValidKeys] = None,
288
+ ) -> None:
289
+ if key is None:
290
+ request.set_input_tensor(tensor)
291
+ elif isinstance(key, int):
292
+ request.set_input_tensor(key, tensor)
293
+ elif isinstance(key, (str, ConstOutput)):
294
+ request.set_tensor(key, tensor)
295
+ else:
296
+ raise TypeError(f"Unsupported key type: {type(key)} for Tensor under key: {key}")
297
+
298
+
299
+ @singledispatch
300
+ def update_tensor(
301
+ inputs: Any,
302
+ request: _InferRequestWrapper,
303
+ key: Optional[ValidKeys] = None,
304
+ ) -> None:
305
+ if hasattr(inputs, "__array__"):
306
+ update_tensor(normalize_arrays(inputs, is_shared=False), request, key)
307
+ return None
308
+ raise TypeError(f"Incompatible inputs of type: {type(inputs)} under {key} key!")
309
+
310
+
311
+ @update_tensor.register(np.ndarray)
312
+ def _(
313
+ inputs: np.ndarray,
314
+ request: _InferRequestWrapper,
315
+ key: Optional[ValidKeys] = None,
316
+ ) -> None:
317
+ if inputs.ndim != 0:
318
+ tensor = get_request_tensor(request, key)
319
+ # Update shape if there is a mismatch
320
+ if tuple(tensor.shape) != inputs.shape:
321
+ tensor.shape = inputs.shape
322
+ # When copying, type should be up/down-casted automatically.
323
+ if tensor.element_type == Type.string:
324
+ tensor.bytes_data = inputs
325
+ else:
326
+ tensor.data[:] = inputs[:]
327
+ else:
328
+ # If shape is "empty", assume this is a scalar value
329
+ set_request_tensor(
330
+ request,
331
+ value_to_tensor(inputs, request=request, is_shared=False, key=key),
332
+ key,
333
+ )
334
+
335
+
336
+ @update_tensor.register(np.number) # type: ignore
337
+ @update_tensor.register(float)
338
+ @update_tensor.register(int)
339
+ @update_tensor.register(str)
340
+ def _(
341
+ inputs: Union[ScalarTypes, str],
342
+ request: _InferRequestWrapper,
343
+ key: Optional[ValidKeys] = None,
344
+ ) -> None:
345
+ set_request_tensor(
346
+ request,
347
+ value_to_tensor(inputs, request=request, is_shared=False, key=key),
348
+ key,
349
+ )
350
+
351
+
352
+ def update_inputs(inputs: dict, request: _InferRequestWrapper) -> dict:
353
+ """Helper function to prepare inputs for inference.
354
+
355
+ It creates copy of Tensors or copy data to already allocated Tensors on device
356
+ if the item is of type `np.ndarray`, `np.number`, `int`, `float` or has numpy __array__ attribute.
357
+ If value is of type `list`, create a Tensor based on it, copy will occur in the Tensor constructor.
358
+ """
359
+ # Create new temporary dictionary.
360
+ # new_inputs will be used to transfer data to inference calls,
361
+ # ensuring that original inputs are not overwritten with Tensors.
362
+ new_inputs: Dict[ValidKeys, Tensor] = {}
363
+ for key, value in inputs.items():
364
+ if not isinstance(key, (str, int, ConstOutput)):
365
+ raise TypeError(f"Incompatible key type for input: {key}")
366
+ # Copy numpy arrays to already allocated Tensors.
367
+ # If value object has __array__ attribute, load it to Tensor using np.array
368
+ if isinstance(value, (np.ndarray, np.number, int, float, str)) or hasattr(value, "__array__"):
369
+ update_tensor(value, request, key)
370
+ elif isinstance(value, list):
371
+ new_inputs[key] = Tensor(value)
372
+ # If value is of Tensor type, put it into temporary dictionary.
373
+ elif isinstance(value, Tensor):
374
+ new_inputs[key] = value
375
+ # Throw error otherwise.
376
+ else:
377
+ raise TypeError(f"Incompatible inputs of type: {type(value)} under {key} key!")
378
+ return new_inputs
379
+
380
+
381
+ @singledispatch
382
+ def create_copied(
383
+ inputs: Union[ContainerTypes, np.ndarray, ScalarTypes, str, bytes],
384
+ request: _InferRequestWrapper,
385
+ ) -> Union[dict, None]:
386
+ # Check the special case of the array-interface
387
+ if hasattr(inputs, "__array__"):
388
+ update_tensor(normalize_arrays(inputs, is_shared=False), request, key=None)
389
+ return {}
390
+ # Error should be raised if type does not match any dispatchers
391
+ raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
392
+
393
+
394
+ @create_copied.register(dict)
395
+ @create_copied.register(tuple)
396
+ @create_copied.register(OVDict)
397
+ def _(
398
+ inputs: Union[dict, tuple, OVDict],
399
+ request: _InferRequestWrapper,
400
+ ) -> dict:
401
+ return update_inputs(normalize_arrays(inputs, is_shared=False), request)
402
+
403
+
404
+ # Special override to perform list-related dispatch
405
+ @create_copied.register(list)
406
+ def _(
407
+ inputs: list,
408
+ request: _InferRequestWrapper,
409
+ ) -> dict:
410
+ # If list is passed to single input model and consists only of simple types
411
+ # i.e. str/bytes/float/int, wrap around it and pass into the dispatcher.
412
+ return update_inputs(normalize_arrays([inputs] if request._is_single_input() and is_list_simple_type(inputs) else inputs, is_shared=False), request)
413
+
414
+
415
+ @create_copied.register(np.ndarray)
416
+ def _(
417
+ inputs: np.ndarray,
418
+ request: _InferRequestWrapper,
419
+ ) -> dict:
420
+ update_tensor(normalize_arrays(inputs, is_shared=False), request, key=None)
421
+ return {}
422
+
423
+
424
+ @create_copied.register(Tensor)
425
+ @create_copied.register(np.number)
426
+ @create_copied.register(int)
427
+ @create_copied.register(float)
428
+ @create_copied.register(str)
429
+ @create_copied.register(bytes)
430
+ def _(
431
+ inputs: Union[Tensor, ScalarTypes, str, bytes],
432
+ request: _InferRequestWrapper,
433
+ ) -> Tensor:
434
+ return value_to_tensor(inputs, request=request, is_shared=False)
435
+ ###
436
+ # End of "copied" dispatcher methods.
437
+ ###
438
+
439
+
440
+ def _data_dispatch(
441
+ request: _InferRequestWrapper,
442
+ inputs: Union[ContainerTypes, Tensor, np.ndarray, ScalarTypes, str] = None,
443
+ is_shared: bool = False,
444
+ ) -> Union[dict, Tensor]:
445
+ if inputs is None:
446
+ return {}
447
+ return create_shared(inputs, request) if is_shared else create_copied(inputs, request)
@@ -0,0 +1,148 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) 2018-2024 Intel Corporation
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import numpy as np
6
+
7
+ from functools import singledispatchmethod
8
+ from collections.abc import Mapping
9
+ from typing import Dict, Set, Tuple, Union, Iterator, Optional
10
+ from typing import KeysView, ItemsView, ValuesView
11
+
12
+ from openvino._pyopenvino import Tensor, ConstOutput
13
+ from openvino._pyopenvino import InferRequest as InferRequestBase
14
+
15
+
16
+ def tensor_from_file(path: str) -> Tensor:
17
+ """Create Tensor from file. Data will be read with dtype of unit8."""
18
+ return Tensor(np.fromfile(path, dtype=np.uint8)) # type: ignore
19
+
20
+
21
+ class _InferRequestWrapper(InferRequestBase):
22
+ """InferRequest class with internal memory."""
23
+
24
+ def __init__(self, other: InferRequestBase) -> None:
25
+ # Private memeber to store newly created shared memory data
26
+ self._inputs_data = None
27
+ super().__init__(other)
28
+
29
+ def _is_single_input(self) -> bool:
30
+ return len(self.input_tensors) == 1
31
+
32
+
33
+ class OVDict(Mapping):
34
+ """Custom OpenVINO dictionary with inference results.
35
+
36
+ This class is a dict-like object. It provides possibility to
37
+ address data tensors with three key types:
38
+
39
+ * `openvino.runtime.ConstOutput` - port of the output
40
+ * `int` - index of the output
41
+ * `str` - names of the output
42
+
43
+ This class follows `frozenset`/`tuple` concept of immutability.
44
+ It is prohibited to assign new items or edit them.
45
+
46
+ To revert to the previous behavior use `to_dict` method which
47
+ return shallow copy of underlaying dictionary.
48
+ Note: It removes addressing feature! New dictionary keeps
49
+ only `ConstOutput` keys.
50
+
51
+ If a tuple returns value is needed, use `to_tuple` method which
52
+ converts values to the tuple.
53
+
54
+ :Example:
55
+
56
+ .. code-block:: python
57
+
58
+ # Reverts to the previous behavior of the native dict
59
+ result = request.infer(inputs).to_dict()
60
+ # or alternatively:
61
+ result = dict(request.infer(inputs))
62
+
63
+ .. code-block:: python
64
+
65
+ # To dispatch outputs of multi-ouput inference:
66
+ out1, out2, out3, _ = request.infer(inputs).values()
67
+ # or alternatively:
68
+ out1, out2, out3, _ = request.infer(inputs).to_tuple()
69
+ """
70
+ def __init__(self, _dict: Dict[ConstOutput, np.ndarray]) -> None:
71
+ self._dict = _dict
72
+ self._names: Optional[Dict[ConstOutput, Set[str]]] = None
73
+
74
+ def __iter__(self) -> Iterator:
75
+ return self._dict.__iter__()
76
+
77
+ def __len__(self) -> int:
78
+ return len(self._dict)
79
+
80
+ def __repr__(self) -> str:
81
+ return self._dict.__repr__()
82
+
83
+ def __get_names(self) -> Dict[ConstOutput, Set[str]]:
84
+ """Return names of every output key.
85
+
86
+ Insert empty set if key has no name.
87
+ """
88
+ return {key: key.get_names() for key in self._dict.keys()}
89
+
90
+ def __get_key(self, index: int) -> ConstOutput:
91
+ return list(self._dict.keys())[index]
92
+
93
+ @singledispatchmethod
94
+ def __getitem_impl(self, key: Union[ConstOutput, int, str]) -> np.ndarray:
95
+ raise TypeError(f"Unknown key type: {type(key)}")
96
+
97
+ @__getitem_impl.register
98
+ def _(self, key: ConstOutput) -> np.ndarray:
99
+ return self._dict[key]
100
+
101
+ @__getitem_impl.register
102
+ def _(self, key: int) -> np.ndarray:
103
+ try:
104
+ return self._dict[self.__get_key(key)]
105
+ except IndexError:
106
+ raise KeyError(key)
107
+
108
+ @__getitem_impl.register
109
+ def _(self, key: str) -> np.ndarray:
110
+ if self._names is None:
111
+ self._names = self.__get_names()
112
+ for port, port_names in self._names.items():
113
+ if key in port_names:
114
+ return self._dict[port]
115
+ raise KeyError(key)
116
+
117
+ def __getitem__(self, key: Union[ConstOutput, int, str]) -> np.ndarray:
118
+ return self.__getitem_impl(key)
119
+
120
+ def keys(self) -> KeysView[ConstOutput]:
121
+ return self._dict.keys()
122
+
123
+ def values(self) -> ValuesView[np.ndarray]:
124
+ return self._dict.values()
125
+
126
+ def items(self) -> ItemsView[ConstOutput, np.ndarray]:
127
+ return self._dict.items()
128
+
129
+ def names(self) -> Tuple[Set[str], ...]:
130
+ """Return names of every output key.
131
+
132
+ Insert empty set if key has no name.
133
+ """
134
+ if self._names is None:
135
+ self._names = self.__get_names()
136
+ return tuple(self._names.values())
137
+
138
+ def to_dict(self) -> Dict[ConstOutput, np.ndarray]:
139
+ """Return underlaying native dictionary.
140
+
141
+ Function performs shallow copy, thus any modifications to
142
+ returned values may affect this class as well.
143
+ """
144
+ return self._dict
145
+
146
+ def to_tuple(self) -> tuple:
147
+ """Convert values of this dictionary to a tuple."""
148
+ return tuple(self._dict.values())