bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,481 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+
8
+ import logging as log
9
+ import numpy as np
10
+ import sys
11
+ from openvino.runtime import PartialShape, Dimension, Type
12
+ from packaging.version import parse, Version
13
+ from typing import List, Dict, Union
14
+
15
+
16
+ # TODO: reuse this method in ovc and remove duplication
17
+ def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None):
18
+ # Current function returns list with static dimensions with following logic.
19
+ # For dynamic dimensions return lower boundaries if they are set, otherwise
20
+ # return upper boundaries if they are set. If dimension is fully dynamic then raise error.
21
+ shape_list = []
22
+ for idx, dim in enumerate(shape):
23
+ if isinstance(dim, int):
24
+ if dim == -1:
25
+ shape_list.append(dynamic_value)
26
+ continue
27
+ shape_list.append(dim)
28
+ elif isinstance(dim, np.int64):
29
+ if dim == np.int64(-1):
30
+ shape_list.append(dynamic_value)
31
+ continue
32
+ shape_list.append(dim)
33
+ elif isinstance(dim, tuple):
34
+ # tuple where (min_length, max_length), the format which uses MO cli parser
35
+ assert len(dim) == 2, "Unknown dimension type {}".format(dim)
36
+ if dim[0] > 0:
37
+ shape_list.append(dim[0])
38
+ elif dim[1] < np.iinfo(np.int64).max:
39
+ shape_list.append(dim[1])
40
+ else:
41
+ shape_list.append(dynamic_value)
42
+ continue
43
+ elif isinstance(dim, Dimension):
44
+ if dim.is_static or dim.get_min_length() > 0:
45
+ shape_list.append(dim.get_min_length())
46
+ elif dim.get_max_length() != -1:
47
+ shape_list.append(dim.get_max_length())
48
+ else:
49
+ shape_list.append(dynamic_value)
50
+ continue
51
+ else:
52
+ raise Exception("Unknown dimension type {}".format(dim))
53
+
54
+ return tuple(shape_list)
55
+
56
+
57
+ def get_imported_module_version(imported_module):
58
+ """
59
+ Get imported module version
60
+ :return: version(str) or raise AttributeError exception
61
+ """
62
+ version_attrs = ("__version__", "VERSION", "version")
63
+ installed_version = None
64
+ for attr in version_attrs:
65
+ installed_version = getattr(imported_module, attr, None)
66
+ if isinstance(installed_version, str):
67
+ return installed_version
68
+ else:
69
+ installed_version = None
70
+
71
+ if installed_version is None:
72
+ raise AttributeError("{} module doesn't have version attribute".format(imported_module))
73
+ else:
74
+ return installed_version
75
+
76
+
77
+ # TODO: reuse this method in ovc and remove duplication
78
+ def get_environment_setup(framework):
79
+ """
80
+ Get environment setup such as Python version, TensorFlow version
81
+ :param framework: framework name
82
+ :return: a dictionary of environment variables
83
+ """
84
+ env_setup = dict()
85
+ python_version = "{}.{}.{}".format(sys.version_info.major,
86
+ sys.version_info.minor,
87
+ sys.version_info.micro)
88
+ env_setup['python_version'] = python_version
89
+ try:
90
+ if framework == 'tf':
91
+ exec("import tensorflow")
92
+ env_setup['tensorflow'] = get_imported_module_version(sys.modules["tensorflow"])
93
+ exec("del tensorflow")
94
+ except (AttributeError, ImportError):
95
+ pass
96
+ env_setup['sys_platform'] = sys.platform
97
+ return env_setup
98
+
99
+
100
+ def trace_tf_model_if_needed(input_model, placeholder_shapes, placeholder_data_types, example_input):
101
+ import tensorflow as tf
102
+ if not isinstance(input_model,
103
+ (tf.keras.layers.Layer, tf.Module, tf.keras.Model, tf.types.experimental.GenericFunction)):
104
+ return input_model
105
+ return trace_tf_model(input_model, placeholder_shapes, placeholder_data_types, example_input)
106
+
107
+
108
+ def partial_shape_to_list(partial_shape: PartialShape):
109
+ if partial_shape.rank.is_dynamic:
110
+ return None
111
+ res_list = []
112
+ for dim in partial_shape:
113
+ if dim.is_static:
114
+ res_list.append(dim.get_length())
115
+ else:
116
+ res_list.append(None)
117
+ return res_list
118
+
119
+
120
+ def get_input_spec_from_model(model, input_shapes=None):
121
+ import tensorflow as tf
122
+ if hasattr(model, "_build_input_shape") and model._build_input_shape is not None:
123
+ if isinstance(model._build_input_shape, list):
124
+ input_spec = [[tf.TensorSpec(shape) for shape in model._build_input_shape]]
125
+ else:
126
+ input_spec = [tf.TensorSpec(model._build_input_shape)]
127
+ elif input_shapes and isinstance(input_shapes, list) and len(input_shapes) > 0:
128
+ input_spec = []
129
+ for input_shape in input_shapes:
130
+ if isinstance(input_shape, PartialShape):
131
+ input_spec.append(tf.TensorSpec(partial_shape_to_list(input_shape)))
132
+ else:
133
+ input_spec.append(tf.TensorSpec(None))
134
+ else:
135
+ input_spec = [tf.TensorSpec(None)]
136
+ return input_spec
137
+
138
+
139
+ def get_concrete_func(tf_function, example_input, input_needs_packing, error_message, use_example_input=True):
140
+ """
141
+ Runs tracing of TF function and returns a concrete function.
142
+
143
+ :param tf_function: TF function that needs to be traced.
144
+ :param example_input: Example of function input.
145
+ :param input_needs_packing: determines if input needs to be packed in a list before passing to TF function.
146
+ It is used when original function was wrapped in outer TF function, which changes function signature.
147
+ In this case wrapper TF function always expects list of inputs which are unpacked inside subfunction.
148
+ So list/tuple are treated as multiple inputs of original model.
149
+ Non list/tuple are treated as single input, and it needs packing to a list,
150
+ as wrapper function always expect list of inputs.
151
+ :param error_message: Error message which should be shown in case of tracing error.
152
+ :param use_example_input: Determines if example_input should be used.
153
+
154
+ :returns: Object of type tf.types.experimental.ConcreteFunction.
155
+ """
156
+ if input_needs_packing and not isinstance(example_input, (list, tuple)):
157
+ example_input = [example_input]
158
+ try:
159
+ if use_example_input:
160
+ if not input_needs_packing and isinstance(example_input, (list, tuple)):
161
+ concrete_func = tf_function.get_concrete_function(*example_input)
162
+ else:
163
+ concrete_func = tf_function.get_concrete_function(example_input)
164
+
165
+ else:
166
+ concrete_func = tf_function.get_concrete_function()
167
+ except Exception as e:
168
+ raise Exception(error_message.format(e))
169
+ return concrete_func
170
+
171
+
172
+ def get_signature_from_input(keras_model):
173
+ if not hasattr(keras_model, 'input') or getattr(keras_model, 'input') is None:
174
+ return None
175
+ return getattr(keras_model, 'input')
176
+
177
+
178
+ def get_signature_from_input_signature(keras_model):
179
+ if not hasattr(keras_model, 'input_signature') or getattr(keras_model, 'input_signature') is None:
180
+ return None
181
+ return getattr(keras_model, 'input_signature')
182
+
183
+
184
+ def create_generic_function_from_keras_model(keras_model):
185
+ import tensorflow as tf
186
+ assert isinstance(keras_model, (tf.keras.Model, tf.Module)), \
187
+ "[TensorFlow Frontend] internal error: the input model must be of tf.keras.Model or tf.Module model type"
188
+
189
+ keras_input_signature = get_signature_from_input(keras_model)
190
+ if keras_input_signature is None:
191
+ keras_input_signature = get_signature_from_input_signature(keras_model)
192
+ if keras_input_signature is None:
193
+ return None
194
+ tf_input_signature = None
195
+ wrapper_function = None
196
+ if isinstance(keras_input_signature, dict):
197
+ tf_input_signature = []
198
+ for tensor_name, tensor_spec in keras_input_signature.items():
199
+ tf_input_signature.append(tf.TensorSpec(shape=tensor_spec.shape,
200
+ dtype=tensor_spec.dtype,
201
+ name=tensor_name))
202
+ elif isinstance(keras_input_signature, list):
203
+ tf_input_signature = []
204
+ for tensor_spec in keras_input_signature:
205
+ tf_input_signature.append(tf.TensorSpec(shape=tensor_spec.shape,
206
+ dtype=tensor_spec.dtype,
207
+ name=tensor_spec.name))
208
+ else:
209
+ try:
210
+ # single KerasTensor case
211
+ tf_input_signature = []
212
+ tf_input_signature.append(tf.TensorSpec(shape=keras_input_signature.shape,
213
+ dtype=keras_input_signature.dtype,
214
+ name=keras_input_signature.name))
215
+ except:
216
+ tf_input_signature = None
217
+ if tf_input_signature is not None:
218
+ @tf.function(input_signature=tf_input_signature)
219
+ def wrapper_function_dict(*args):
220
+ if isinstance(keras_input_signature, list):
221
+ outputs = keras_model(args)
222
+ else:
223
+ input_dict = {}
224
+ for ind, tensor_spec in enumerate(tf_input_signature):
225
+ input_dict[tensor_spec.name] = args[ind]
226
+ outputs = keras_model(input_dict)
227
+ # need to wrap the output into dictionary
228
+ # it helps to preserve original keras tensor names
229
+ post_outputs = {}
230
+ if isinstance(outputs, dict):
231
+ for output_name, output_value in outputs.items():
232
+ post_outputs[output_name] = output_value
233
+ else:
234
+ try:
235
+ if isinstance(outputs, list) and isinstance(keras_model.outputs, list) and \
236
+ len(outputs) == len(keras_model.outputs):
237
+ for output_value, output_tensor in zip(outputs, keras_model.outputs):
238
+ post_outputs[output_tensor.name] = output_value
239
+ else:
240
+ post_outputs[keras_model.output.name] = outputs
241
+ except:
242
+ post_outputs = outputs
243
+ return post_outputs
244
+
245
+ wrapper_function = wrapper_function_dict
246
+ return wrapper_function
247
+
248
+
249
+ def trace_tf_model(model, input_shapes, input_types, example_input):
250
+ import tensorflow as tf
251
+ if isinstance(model.__call__, tf.types.experimental.GenericFunction):
252
+ tf_function = model.__call__
253
+ input_needs_packing = False
254
+ elif isinstance(model, tf.types.experimental.GenericFunction):
255
+ tf_function = model
256
+ input_needs_packing = False
257
+ elif isinstance(model, (tf.keras.Model, tf.Module)):
258
+ tf_function = create_generic_function_from_keras_model(model)
259
+ if tf_function is not None:
260
+ input_needs_packing = False
261
+ else:
262
+ # Wrap model to tf.Function.
263
+ # In this case we loose input/output tensor names.
264
+ @tf.function
265
+ def tf_function(args):
266
+ return model(*args)
267
+
268
+ input_needs_packing = True
269
+ else:
270
+ # Wrap model to tf.Function.
271
+ # In this case we loose input/output tensor names.
272
+ @tf.function
273
+ def tf_function(args):
274
+ return model(*args)
275
+
276
+ input_needs_packing = True
277
+
278
+ def are_shapes_defined(shape: Union[List, Dict]):
279
+ if shape is None:
280
+ return False
281
+ assert hasattr(shape, '__len__')
282
+ if len(shape) == 0:
283
+ return False
284
+
285
+ if isinstance(shape, list):
286
+ return np.all([shape is not None for shape in input_shapes])
287
+ elif isinstance(shape, dict):
288
+ return np.all([shape is not None for name, shape in input_shapes.items()])
289
+
290
+ if example_input is not None:
291
+ concrete_func = get_concrete_func(tf_function, example_input, input_needs_packing,
292
+ "Could not trace the TF model with the following error: {}")
293
+ else:
294
+ if isinstance(tf_function, tf.types.experimental.GenericFunction) and \
295
+ tf_function.input_signature is not None:
296
+ concrete_func = get_concrete_func(tf_function, None, input_needs_packing,
297
+ "Could not trace the TF model with the following error: {}",
298
+ use_example_input=False)
299
+ else:
300
+ input_spec = get_input_spec_from_model(model, input_shapes)
301
+ concrete_func = get_concrete_func(tf_function, input_spec, input_needs_packing,
302
+ "Could not trace the TF model with the following error: {}.\n"
303
+ "Please provide 'example_input'.")
304
+
305
+ return concrete_func
306
+
307
+
308
+ def type_supported_by_tf_fe(input_model):
309
+ import tensorflow as tf
310
+ # Types that require tracing
311
+ if isinstance(input_model,
312
+ (tf.keras.layers.Layer, tf.Module, tf.keras.Model, tf.types.experimental.GenericFunction)):
313
+ return True
314
+ # Types that do not require tracing
315
+ if isinstance(input_model, (tf.Graph, tf.types.experimental.ConcreteFunction)):
316
+ return True
317
+ # GraphIterator
318
+ elif model_is_graph_iterator(input_model):
319
+ return True
320
+ return False
321
+
322
+
323
+ def is_variable(func_input, captures):
324
+ import tensorflow as tf
325
+ if func_input.dtype == tf.resource:
326
+ return True
327
+ for capture in captures:
328
+ if id(func_input) == id(capture[1]):
329
+ return True
330
+ return False
331
+
332
+
333
+ def create_tf_graph_iterator(input_model, placeholder_shapes, placeholder_data_types, example_input, share_weights):
334
+ input_model = trace_tf_model_if_needed(input_model, placeholder_shapes, placeholder_data_types, example_input)
335
+
336
+ import tensorflow as tf
337
+ from openvino.frontend.tensorflow.graph_iterator import GraphIteratorTFGraph
338
+ if model_is_graph_iterator(input_model):
339
+ return input_model
340
+ if isinstance(input_model, tf.Graph):
341
+ return GraphIteratorTFGraph(input_model, share_weights)
342
+ elif isinstance(input_model, tf.types.experimental.ConcreteFunction):
343
+ # create a map for inputs to map internal tensor name to external one
344
+ # collect all internal tensor names in a given order
345
+ input_names_map = None
346
+ if hasattr(input_model, 'inputs') and hasattr(input_model, 'structured_input_signature'):
347
+ internal_tensor_names = []
348
+ for func_input in input_model.inputs:
349
+ if is_variable(func_input, input_model.graph.captures):
350
+ continue
351
+ internal_tensor_names.append(func_input.name)
352
+ if len(input_model.structured_input_signature) > 0 and \
353
+ len(internal_tensor_names) == len(input_model.structured_input_signature[0]):
354
+ for internal_name, tensor_spec in zip(internal_tensor_names, input_model.structured_input_signature[0]):
355
+ input_names_map = input_names_map or {}
356
+ if not isinstance(tensor_spec, tf.TensorSpec):
357
+ input_names_map = None
358
+ break
359
+ input_names_map[internal_name] = tensor_spec.name
360
+ elif len(input_model.structured_input_signature) > 1 and \
361
+ len(internal_tensor_names) == len(input_model.structured_input_signature[1]):
362
+ external_tensor_names = sorted(input_model.structured_input_signature[1].keys())
363
+ for internal_name, external_name in zip(internal_tensor_names, external_tensor_names):
364
+ input_names_map = input_names_map or {}
365
+ input_names_map[internal_name] = external_name
366
+
367
+ output_names_map = None
368
+ if hasattr(input_model, 'outputs') and hasattr(input_model, 'structured_outputs') and \
369
+ isinstance(input_model.structured_outputs, dict):
370
+ external_names = sorted(list(input_model.structured_outputs.keys()))
371
+ internal_names = [tensor.name for tensor in input_model.outputs]
372
+ if len(external_names) == len(internal_names):
373
+ for external_name, internal_name in zip(external_names, internal_names):
374
+ output_names_map = output_names_map or {}
375
+ output_names_map[internal_name] = external_name
376
+ else:
377
+ for external_name, internal_tensor in input_model.structured_outputs.items():
378
+ internal_tf_tensor = None
379
+ if isinstance(internal_tensor, tf.Tensor):
380
+ internal_tf_tensor = internal_tensor
381
+ if isinstance(internal_tensor, list) and len(internal_tensor) > 0 and \
382
+ isinstance(internal_tensor[0], tf.Tensor):
383
+ internal_tf_tensor = internal_tensor[0]
384
+ if internal_tf_tensor is None:
385
+ output_names_map = None
386
+ break
387
+ output_names_map = output_names_map or {}
388
+ output_names_map[internal_tf_tensor.name] = external_name
389
+ return GraphIteratorTFGraph(input_model.graph, share_weights, False, input_names_map, output_names_map)
390
+ raise Exception("Could not wrap model of type {} to GraphIteratorTFGraph.".format(type(input_model)))
391
+
392
+
393
+ def extract_model_graph(argv):
394
+ model = argv["input_model"]
395
+ import tensorflow as tf
396
+ trackable_is_imported = False
397
+ try:
398
+ from tensorflow.python.training.tracking.base import Trackable # pylint: disable=no-name-in-module,import-error
399
+ trackable_is_imported = True
400
+ except:
401
+ try:
402
+ from tensorflow.python.trackable.base import Trackable
403
+ trackable_is_imported = True
404
+ except:
405
+ log.warning("Could not import tensorflow.python.training.tracking.base.Trackable type.")
406
+ env_setup = get_environment_setup("tf")
407
+ if isinstance(model, tf.Graph):
408
+ return True
409
+ if isinstance(model, tf.compat.v1.GraphDef):
410
+ graph = tf.Graph()
411
+ with graph.as_default():
412
+ tf.graph_util.import_graph_def(model, name='')
413
+ argv["input_model"] = graph
414
+ return True
415
+ if isinstance(model, tf.compat.v1.Session):
416
+ argv["input_model"] = model.graph
417
+ return True
418
+ if Version(env_setup["tensorflow"]) >= parse("2.6.0") and isinstance(model, (tf.types.experimental.GenericFunction,
419
+ tf.types.experimental.ConcreteFunction)):
420
+ return True
421
+ if isinstance(model, tf.train.Checkpoint):
422
+ if isinstance(model.root, tf.keras.Model):
423
+ argv["input_model"] = model.root
424
+ return True
425
+ else:
426
+ raise Exception("Unknown checkpoint format.")
427
+
428
+ if isinstance(model, (tf.keras.layers.Layer, tf.Module, tf.keras.Model)):
429
+ return True
430
+ if trackable_is_imported and isinstance(model, Trackable):
431
+ if hasattr(model, "signatures") and len(model.signatures.items()):
432
+ if "serving_default" in model.signatures:
433
+ argv["input_model"] = model.signatures["serving_default"]
434
+ elif "default" in model.signatures:
435
+ argv["input_model"] = model.signatures["default"]
436
+ else:
437
+ for signature_name, signature in model.signatures.items():
438
+ argv["input_model"] = model.signatures[signature_name]
439
+ log.warning("Could not find the default signature. "
440
+ "The following signature was used for conversion: {}".format(signature_name))
441
+ break
442
+
443
+ elif hasattr(model, "graph"):
444
+ argv["input_model"] = model.graph
445
+ else:
446
+ raise Exception("Could not find signature of graph in a Trackable object.")
447
+ return True
448
+ if model_is_graph_iterator(model):
449
+ return True
450
+ return False
451
+
452
+
453
+ def model_is_graph_iterator(model):
454
+ try:
455
+ from openvino.frontend.tensorflow.graph_iterator import GraphIteratorTFGraph
456
+ except:
457
+ return False
458
+ return isinstance(model, GraphIteratorTFGraph)
459
+
460
+
461
+ def tf_type_to_ov_type(val):
462
+ import tensorflow as tf # pylint: disable=import-error
463
+ if not isinstance(val, tf.dtypes.DType):
464
+ raise Exception("The provided type is not a TF type {}.".format(val))
465
+
466
+ tf_to_ov_type = {
467
+ tf.float32: Type.f32,
468
+ tf.float16: Type.f16,
469
+ tf.float64: Type.f64,
470
+ tf.bfloat16: Type.bf16,
471
+ tf.uint8: Type.u8,
472
+ tf.int8: Type.i8,
473
+ tf.int16: Type.i16,
474
+ tf.int32: Type.i32,
475
+ tf.int64: Type.i64,
476
+ tf.bool: Type.boolean,
477
+ tf.string: Type.string
478
+ }
479
+ if val not in tf_to_ov_type:
480
+ raise Exception("The provided data type is not supported by OpenVino {}.".format(val))
481
+ return tf_to_ov_type[val]
@@ -0,0 +1,6 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+
6
+ from openvino.helpers.packing import pack_data, unpack_data
@@ -0,0 +1,87 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+
6
+ import numpy as np
7
+ from typing import Union
8
+ from openvino.runtime import Type, Shape
9
+
10
+
11
+ def pack_data(array: np.ndarray, type: Type) -> np.ndarray:
12
+ """Represent array values as u1,u4 or i4 openvino element type and pack them into uint8 numpy array.
13
+
14
+ If the number of elements in array is odd we pad them with zero value to be able to fit the bit
15
+ sequence into the uint8 array.
16
+
17
+ Example: two uint8 values - [7, 8] can be represented as uint4 values and be packed into one int8
18
+ value - [120], because [7, 8] bit representation is [0111, 1000] will be viewed
19
+ as [01111000], which is bit representation of [120].
20
+
21
+ :param array: numpy array with values to pack.
22
+ :type array: numpy array
23
+ :param type: Type to interpret the array values. Type must be u1, u4, i4, nf4 or f4e2m1.
24
+ :type type: openvino.runtime.Type
25
+ """
26
+ assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Packing algorithm for the" "data types stored in 1, 2 or 4 bits"
27
+
28
+ minimum_regular_dtype = np.int8 if type == Type.i4 else np.uint8
29
+ casted_to_regular_type = array.astype(dtype=minimum_regular_dtype, casting="unsafe")
30
+ if not np.array_equal(casted_to_regular_type, array):
31
+ raise RuntimeError(f'The conversion of array "{array}" to dtype' f' "{casted_to_regular_type}" results in rounding')
32
+
33
+ data_size = casted_to_regular_type.size
34
+ num_bits = type.bitwidth
35
+
36
+ assert num_bits < 8 and 8 % num_bits == 0, "Packing algorithm for the" "data types stored in 1, 2 or 4 bits"
37
+ num_values_fitting_into_uint8 = 8 // num_bits
38
+ pad = (-data_size) % num_values_fitting_into_uint8
39
+
40
+ flattened = casted_to_regular_type.flatten()
41
+ padded = np.concatenate((flattened, np.zeros([pad], dtype=minimum_regular_dtype))) # type: ignore
42
+ assert padded.size % num_values_fitting_into_uint8 == 0
43
+
44
+ bit_order_little = (padded[:, None] & (1 << np.arange(num_bits)) > 0).astype(minimum_regular_dtype)
45
+ bit_order_big = np.flip(bit_order_little, axis=1) # type: ignore
46
+ bit_order_big_flattened = bit_order_big.flatten()
47
+
48
+ return np.packbits(bit_order_big_flattened)
49
+
50
+
51
+ def unpack_data(array: np.ndarray, type: Type, shape: Union[list, Shape]) -> np.ndarray:
52
+ """Extract openvino element type values from array into new uint8/int8 array given shape.
53
+
54
+ Example: uint8 value [120] can be represented as two u4 values and be unpacked into [7, 8]
55
+ because [120] bit representation is [01111000] will be viewed as [0111, 1000],
56
+ which is bit representation of [7, 8].
57
+
58
+ :param array: numpy array to unpack.
59
+ :type array: numpy array
60
+ :param type: Type to extract from array values. Type must be u1, u4, i4, nf4 or f4e2m1.
61
+ :type type: openvino.runtime.Type
62
+ :param shape: the new shape for the unpacked array.
63
+ :type shape: Union[list, openvino.runtime.Shape]
64
+ """
65
+ assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Unpacking algorithm for the" "data types stored in 1, 2 or 4 bits"
66
+ unpacked = np.unpackbits(array.view(np.uint8))
67
+ shape = list(shape)
68
+ if type.bitwidth == 1:
69
+ return np.resize(unpacked, shape)
70
+ else:
71
+ unpacked = unpacked.reshape(-1, type.bitwidth)
72
+ padding_shape = (unpacked.shape[0], 8 - type.bitwidth)
73
+ padding = np.ndarray(padding_shape, np.uint8) # type: np.ndarray
74
+ if type == Type.i4:
75
+ for axis, bits in enumerate(unpacked):
76
+ if bits[0] == 1:
77
+ padding[axis] = np.ones((padding_shape[1],), np.uint8)
78
+ else:
79
+ padding[axis] = np.zeros((padding_shape[1],), np.uint8)
80
+ else:
81
+ padding = np.zeros(padding_shape, np.uint8)
82
+ padded = np.concatenate((padding, unpacked), 1) # type: ignore
83
+ packed = np.packbits(padded, 1)
84
+ if type == Type.i4:
85
+ return np.resize(packed, shape).astype(dtype=np.int8)
86
+ else:
87
+ return np.resize(packed, shape)
@@ -0,0 +1,60 @@
1
+ # Torchvision to OpenVINO preprocessing converter
2
+
3
+ The Torchvision to OpenVINO preprocessing converter enables the use of an existing `torchvision.transforms` object to automatically translate it to OpenVINO preprocessing. It is then being embedded into the model, resulting in better inference performance.
4
+
5
+
6
+ ## Supported transforms
7
+
8
+ Currently, the torchvision to OpenVINO preprocessing converter does not support all torchvision transforms.
9
+
10
+ Supported operations:
11
+ - `transforms.Compose`
12
+ - `transforms.Normalize`
13
+ - `transforms.ConvertImageDtype`
14
+ - `transforms.Grayscale`
15
+ - `transforms.Pad`
16
+ - `transforms.ToTensor`
17
+ - `transforms.CenterCrop`
18
+ - `transforms.Resize`
19
+
20
+ ## Example usage
21
+
22
+ ```python
23
+ preprocess_pipeline = torchvision.transforms.Compose(
24
+ [
25
+ torchvision.transforms.Resize(256, interpolation=transforms.InterpolationMode.NEAREST),
26
+ torchvision.transforms.CenterCrop((216, 218)),
27
+ torchvision.transforms.Pad((2, 3, 4, 5), fill=3),
28
+ torchvision.transforms.ToTensor(),
29
+ torchvision.transforms.ConvertImageDtype(torch.float32),
30
+ torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
31
+ ]
32
+ )
33
+
34
+ torch_model = SimpleConvnet(input_channels=3)
35
+
36
+ torch.onnx.export(torch_model, torch.randn(1, 3, 224, 224), "test_convnet.onnx", verbose=False, input_names=["input"], output_names=["output"])
37
+ core = Core()
38
+ ov_model = core.read_model(model="test_convnet.onnx")
39
+
40
+ test_input = np.random.randint(255, size=(260, 260, 3), dtype=np.uint16)
41
+ ov_model = PreprocessConverter.from_torchvision(
42
+ model=ov_model, transform=preprocess_pipeline, input_example=Image.fromarray(test_input.astype("uint8"), "RGB")
43
+ )
44
+ ov_model = core.compile_model(ov_model, "CPU")
45
+ ov_input = np.expand_dims(test_input, axis=0)
46
+ output = ov_model.output(0)
47
+ ov_result = ov_model(ov_input)[output]
48
+ ```
49
+
50
+ ## Key contacts
51
+
52
+ If you have any questions, feature requests or want us to review your PRs, send us a message or ping us on GitHub via [openvino-ie-python-api-maintainers](https://github.com/orgs/openvinotoolkit/teams/openvino-ie-python-api-maintainers). You can always directly contact everyone from this group.
53
+
54
+ ## See also
55
+
56
+ * [OpenVINO™ README](../../../README.md)
57
+ * [OpenVINO™ Core Components](../../README.md)
58
+ * [OpenVINO™ Python API Reference](https://docs.openvino.ai/2024/api/ie_python_api/api.html)
59
+ * [OpenVINO™ Python API Advanced Inference](https://docs.openvino.ai/2024/openvino-workflow/running-inference/integrate-openvino-with-your-application/python-api-advanced-inference.html)
60
+ * [OpenVINO™ Python API Exclusives](https://docs.openvino.ai/2024/openvino-workflow/running-inference/integrate-openvino-with-your-application/python-api-exclusives.html)
@@ -0,0 +1,28 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Low level wrappers for the PrePostProcessing C++ API.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ from openvino._pyopenvino import get_version
12
+
13
+ __version__ = get_version()
14
+
15
+ # main classes
16
+ from openvino._pyopenvino.preprocess import InputInfo
17
+ from openvino._pyopenvino.preprocess import OutputInfo
18
+ from openvino._pyopenvino.preprocess import InputTensorInfo
19
+ from openvino._pyopenvino.preprocess import OutputTensorInfo
20
+ from openvino._pyopenvino.preprocess import InputModelInfo
21
+ from openvino._pyopenvino.preprocess import OutputModelInfo
22
+ from openvino._pyopenvino.preprocess import PrePostProcessor
23
+ from openvino._pyopenvino.preprocess import PreProcessSteps
24
+ from openvino._pyopenvino.preprocess import PostProcessSteps
25
+ from openvino._pyopenvino.preprocess import ColorFormat
26
+ from openvino._pyopenvino.preprocess import ResizeAlgorithm
27
+ from openvino._pyopenvino.preprocess import PaddingMode
28
+
@@ -0,0 +1,15 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Torchvision to OpenVINO preprocess converter.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ from openvino._pyopenvino import get_version as _get_version
12
+
13
+ __version__ = _get_version()
14
+
15
+ from .preprocess_converter import PreprocessConverter