bigdl-core-npu 2.5.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. bigdl_core_npu-2.5.0.dist-info/METADATA +35 -0
  2. bigdl_core_npu-2.5.0.dist-info/RECORD +223 -0
  3. bigdl_core_npu-2.5.0.dist-info/WHEEL +5 -0
  4. bigdl_core_npu-2.5.0.dist-info/top_level.txt +1 -0
  5. intel_npu_acceleration_library/__init__.py +24 -0
  6. intel_npu_acceleration_library/_version.py +6 -0
  7. intel_npu_acceleration_library/backend/__init__.py +37 -0
  8. intel_npu_acceleration_library/backend/base.py +215 -0
  9. intel_npu_acceleration_library/backend/bindings.py +279 -0
  10. intel_npu_acceleration_library/backend/compression.py +24 -0
  11. intel_npu_acceleration_library/backend/convolution.py +58 -0
  12. intel_npu_acceleration_library/backend/factory.py +944 -0
  13. intel_npu_acceleration_library/backend/linear.py +60 -0
  14. intel_npu_acceleration_library/backend/matmul.py +59 -0
  15. intel_npu_acceleration_library/backend/mlp.py +58 -0
  16. intel_npu_acceleration_library/backend/ops.py +141 -0
  17. intel_npu_acceleration_library/backend/qlinear.py +71 -0
  18. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  19. intel_npu_acceleration_library/backend/runtime.py +210 -0
  20. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  21. intel_npu_acceleration_library/backend/tensor.py +1050 -0
  22. intel_npu_acceleration_library/backend/utils.py +70 -0
  23. intel_npu_acceleration_library/compiler.py +194 -0
  24. intel_npu_acceleration_library/device.py +230 -0
  25. intel_npu_acceleration_library/dtypes.py +122 -0
  26. intel_npu_acceleration_library/external/openvino/__init__.py +71 -0
  27. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +20 -0
  28. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  29. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  30. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  31. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  32. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  33. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  34. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  35. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  36. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +352 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +139 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +98 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +119 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +289 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +118 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +536 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +256 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +460 -0
  75. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  76. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  77. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  78. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +26 -0
  79. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  80. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  81. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +4 -0
  82. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  83. intel_npu_acceleration_library/external/openvino/properties/__init__.py +21 -0
  84. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  85. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  86. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  87. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  88. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  89. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  90. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  91. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  92. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  93. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  94. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  95. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  96. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +18 -0
  97. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  98. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  99. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3067 -0
  100. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  101. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  102. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +399 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +10 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +85 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +189 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +783 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +38 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +429 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +70 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  144. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  145. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  146. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  147. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  148. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  149. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  150. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  151. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  152. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  153. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  154. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  155. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  156. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  157. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  158. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  159. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  160. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +536 -0
  161. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  162. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  163. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  164. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +35 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +246 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +205 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +109 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  186. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  187. intel_npu_acceleration_library/external/openvino/utils.py +98 -0
  188. intel_npu_acceleration_library/functional/__init__.py +8 -0
  189. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  190. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  191. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  192. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  193. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  194. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  195. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  196. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  197. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  198. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  199. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  200. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  201. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  202. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  214. intel_npu_acceleration_library/modelling.py +150 -0
  215. intel_npu_acceleration_library/nn/__init__.py +20 -0
  216. intel_npu_acceleration_library/nn/autograd.py +68 -0
  217. intel_npu_acceleration_library/nn/conv.py +257 -0
  218. intel_npu_acceleration_library/nn/functional.py +1207 -0
  219. intel_npu_acceleration_library/nn/linear.py +162 -0
  220. intel_npu_acceleration_library/nn/llm.py +417 -0
  221. intel_npu_acceleration_library/nn/module.py +393 -0
  222. intel_npu_acceleration_library/optimizations.py +157 -0
  223. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,83 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import os
5
+ import sys
6
+ import tempfile
7
+
8
+
9
+ class paddle_frontend_converter:
10
+ def __init__(self, model, inputs=None, outputs=None):
11
+ self.model = model
12
+ self.inputs = inputs
13
+ self.outputs = outputs
14
+ self.tmp = None
15
+ self.model_name = None
16
+ self.pdmodel = None
17
+ self.pdiparams = None
18
+ self.pdiparams_info = None
19
+ self.is_generated = False
20
+
21
+ def destroy(self):
22
+ # close tmp file
23
+ if isinstance(self.tmp, tempfile._TemporaryFileWrapper):
24
+ self.tmp.close()
25
+
26
+ # remove the *.pdmodel
27
+ if os.path.exists(self.pdmodel):
28
+ os.remove(self.pdmodel)
29
+
30
+ # remove the *.pdiparams
31
+ if os.path.exists(self.pdiparams):
32
+ os.remove(self.pdiparams)
33
+
34
+ # remove the *.pdiparams.info
35
+ if os.path.exists(self.pdiparams_info):
36
+ os.remove(self.pdiparams_info)
37
+
38
+ def convert_paddle_to_pdmodel(self):
39
+ '''
40
+ There are three paddle model categories:
41
+ - High Level API: is a wrapper for dynamic or static model, use `self.save` to serialize
42
+ - Dynamic Model: use `paddle.jit.save` to serialize
43
+ - Static Model: use `paddle.static.save_inference_model` to serialize
44
+ '''
45
+ try:
46
+ self.tmp = tempfile.NamedTemporaryFile(delete=True)
47
+ self.model_name = self.tmp.name
48
+ self.pdmodel = "{}.pdmodel".format(self.model_name)
49
+ self.pdiparams = "{}.pdiparams".format(self.model_name)
50
+ self.pdiparams_info = "{}.pdiparams.info".format(self.model_name)
51
+
52
+ import paddle # pylint: disable=import-error
53
+ if isinstance(self.model, paddle.hapi.model.Model):
54
+ self.model.save(self.model_name, False)
55
+ else:
56
+ if self.inputs is None:
57
+ raise RuntimeError(
58
+ "Saving inference model needs 'inputs' before saving. Please specify 'example_input'"
59
+ )
60
+ if isinstance(self.model, paddle.fluid.dygraph.layers.Layer):
61
+ with paddle.fluid.framework._dygraph_guard(None):
62
+ paddle.jit.save(self.model, self.model_name, input_spec=self.inputs, output_spec=self.outputs)
63
+ elif isinstance(self.model, paddle.fluid.executor.Executor):
64
+ if self.outputs is None:
65
+ raise RuntimeError(
66
+ "Model is static. Saving inference model needs 'outputs' before saving. Please specify 'output' for this model"
67
+ )
68
+ paddle.static.save_inference_model(self.model_name, self.inputs, self.outputs, self.model)
69
+ else:
70
+ raise RuntimeError(
71
+ "Conversion just support paddle.hapi.model.Model, paddle.fluid.dygraph.layers.Layer and paddle.fluid.executor.Executor"
72
+ )
73
+
74
+ if not os.path.exists(self.pdmodel):
75
+ print("Failed generating paddle inference format model")
76
+ sys.exit(1)
77
+
78
+ self.is_generated = True
79
+ return self.pdmodel
80
+ finally:
81
+ # close tmp file
82
+ if isinstance(self.tmp, tempfile._TemporaryFileWrapper):
83
+ self.tmp.close()
@@ -0,0 +1,246 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import argparse
5
+ import logging as log
6
+ import sys
7
+ from typing import List
8
+
9
+ import numpy as np
10
+ import os
11
+
12
+ from openvino.frontend import FrontEnd, InputModel, NotImplementedFailure, \
13
+ Place # pylint: disable=no-name-in-module,import-error
14
+ from openvino.runtime import PartialShape, Type # pylint: disable=no-name-in-module,import-error
15
+ from openvino.runtime.utils.types import get_element_type, \
16
+ get_numpy_ctype # pylint: disable=no-name-in-module,import-error
17
+ from openvino.tools.ovc.moc_frontend.analysis import json_model_analysis_dump
18
+ from openvino.tools.ovc.moc_frontend.extractor import fe_user_data_repack, convert_params_lists_to_dicts, \
19
+ fe_output_user_data_repack
20
+ from openvino.tools.ovc.error import Error
21
+ from openvino.tools.ovc.utils import np_map_cast, mo_array
22
+
23
+
24
+ def get_enabled_and_disabled_transforms():
25
+ """
26
+ :return: tuple of lists with force enabled and disabled id of transformations.
27
+ """
28
+ disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else ''
29
+ enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else ''
30
+
31
+ assert isinstance(enabled_transforms, str)
32
+ assert isinstance(disabled_transforms, str)
33
+
34
+ disabled_transforms = disabled_transforms.split(',')
35
+ enabled_transforms = enabled_transforms.split(',')
36
+
37
+ return enabled_transforms, disabled_transforms
38
+
39
+
40
+ def raise_exception_for_input_output_cut(model_inputs_or_outputs: List[Place], new_nodes: List[dict], is_input: bool):
41
+ for new_node in new_nodes:
42
+ node = new_node['node']
43
+
44
+ if not any([item.is_equal(node) for item in model_inputs_or_outputs]):
45
+ if is_input:
46
+ raise Exception("Name {} is not found among model inputs.".format(new_node['input_name']))
47
+ else:
48
+ raise Exception("Name {} is not found among model outputs.".format(new_node['output_name']))
49
+
50
+
51
+ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
52
+ """
53
+ Load input model and convert it to nGraph function
54
+ :param: argv: parsed command line arguments
55
+ :param: moc_front_end: Loaded Frontend for converting input model
56
+ :return: converted nGraph function ready for serialization
57
+ """
58
+
59
+ share_weights = getattr(argv, 'share_weights', True) # FIXME: Should be controlled by default value
60
+ if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2:
61
+ # frozen format with v1 checkpoints
62
+ input_model = moc_front_end.load([part for part in argv.input_model], share_weights)
63
+ else:
64
+ input_model = moc_front_end.load(argv.input_model, share_weights)
65
+
66
+ '''elif argv.input_meta_graph: # TODO: Cover this case
67
+ input_model = moc_front_end.load(argv.input_meta_graph, share_weights)
68
+ if argv.output:
69
+ # Simulate original behavior with freezing model
70
+ # While freezing we do a cutting of model, to keep similar behavior we
71
+ # need to simulate similar behavior with natively supported model
72
+ outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
73
+ input_model.override_all_outputs([x['node'] for x in outputs])
74
+ '''
75
+ argv.placeholder_shapes, argv.placeholder_data_types = convert_params_lists_to_dicts(
76
+ input_model, argv.placeholder_shapes, argv.placeholder_data_types)
77
+
78
+ user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
79
+ input_model, argv.placeholder_shapes, argv.placeholder_data_types,
80
+ argv.output, {}, moc_front_end.get_name())
81
+
82
+ def check_places_are_same(places_original: List[Place], places_new: List[Place]):
83
+ """
84
+ Check if set of new places is same as original or not.
85
+ :param places_original: List[Place] Original model places
86
+ :param places_new: List[Place] New list of places
87
+ :return: True if new list of places is same as original
88
+ """
89
+ return len(places_original) == len(places_new) and len(
90
+ [item for item in places_original if any(
91
+ [item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original)
92
+
93
+ def add_names_to_tensors(model: InputModel, places: List[Place]):
94
+ """
95
+ Adds additional names to some model input tensors. This helper should be used
96
+ when a model modification is going to happen.
97
+ :param model The input model loaded by a given frontend
98
+ :param places An object containing Places and names that will be used for model modification
99
+ """
100
+ for new_input in places:
101
+ if 'input_name' not in new_input:
102
+ continue
103
+ try:
104
+ model.add_name_for_tensor(new_input['node'], new_input['input_name'])
105
+ except NotImplementedFailure as e:
106
+ # some frontends might not implement this method
107
+ log.warning('Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'.format(
108
+ new_input['input_name'], str(e)))
109
+
110
+ enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
111
+ if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
112
+ # NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
113
+ framework_model = moc_front_end.decode(input_model)
114
+ json_model_analysis_dump(framework_model)
115
+ # a model is not processed further in json analysis mode
116
+ sys.exit(0)
117
+
118
+ model_inputs = input_model.get_inputs()
119
+ inputs_equal = True
120
+ if user_shapes:
121
+ # TODO: Remove this line when new 'cut' helper is introduced
122
+ raise_exception_for_input_output_cut(model_inputs, user_shapes, True)
123
+
124
+ inputs_equal = check_places_are_same(model_inputs, user_shapes)
125
+
126
+ outputs_equal = True
127
+ if outputs:
128
+ # TODO: Remove this line when new 'cut' helper is introduced
129
+ raise_exception_for_input_output_cut(input_model.get_outputs(), outputs, False)
130
+
131
+ outputs_equal = check_places_are_same(input_model.get_outputs(), outputs)
132
+ log.debug('Inputs are same: {}, outputs are same: {}'.format(
133
+ inputs_equal, outputs_equal))
134
+
135
+ def create_target_input_shapes(new_input_places):
136
+ if isinstance(new_input_places, list) and len(new_input_places) > 1 \
137
+ and isinstance(new_input_places[0], tuple):
138
+ return new_input_places
139
+ new_input_place_names = [x.get_names()[0] for x in new_input_places]
140
+ shapes = [shape for shape in argv.placeholder_shapes.values()]
141
+ return dict(zip(new_input_place_names, shapes))
142
+
143
+ if not inputs_equal and not outputs_equal:
144
+ log.debug('Using extract subgraph')
145
+ new_input_places = [x['node'] for x in user_shapes]
146
+ new_output_places = [x['node'] for x in outputs]
147
+ add_names_to_tensors(input_model, user_shapes)
148
+ input_model.extract_subgraph(new_input_places, new_output_places)
149
+ # invalidation of existing Place objects could have happened in the operation above
150
+ if user_shapes:
151
+ placeholder_shapes = create_target_input_shapes(new_input_places)
152
+ new_output_places_name = [x.get_names()[0] for x in new_output_places]
153
+
154
+ user_shapes, outputs, _ = fe_user_data_repack(
155
+ input_model, placeholder_shapes, argv.placeholder_data_types,
156
+ new_output_places_name, {}, moc_front_end.get_name())
157
+ elif not inputs_equal:
158
+ log.debug('Using override_all_inputs')
159
+ add_names_to_tensors(input_model, user_shapes)
160
+ new_input_places = [x['node'] for x in user_shapes]
161
+ input_model.override_all_inputs(new_input_places)
162
+ # invalidation of existing Place objects could have happened in the operation above
163
+ if user_shapes:
164
+ placeholder_shapes = create_target_input_shapes(new_input_places)
165
+
166
+ user_shapes, outputs, _ = fe_user_data_repack(
167
+ input_model, placeholder_shapes, argv.placeholder_data_types,
168
+ argv.output, {}, moc_front_end.get_name())
169
+ elif not outputs_equal:
170
+ log.debug('Using override_all_outputs')
171
+ add_names_to_tensors(input_model, user_shapes)
172
+ new_output_places = [x['node'] for x in outputs]
173
+ input_model.override_all_outputs(new_output_places)
174
+ # invalidation of existing Place objects could have happened in the operation above
175
+ if user_shapes:
176
+ model_inputs = input_model.get_inputs()
177
+
178
+ if user_shapes:
179
+ for user_shape in user_shapes:
180
+ if user_shape.get('shape') is not None:
181
+ input_model.set_partial_shape(
182
+ user_shape['node'], user_shape['shape'])
183
+ if user_shape.get('data_type') is not None:
184
+ data_type = user_shape['data_type']
185
+ log.debug('Set data type: {}'.format(data_type))
186
+ input_model.set_element_type(user_shape['node'], data_type)
187
+
188
+ if freeze_placeholder:
189
+ for name, value in freeze_placeholder.items():
190
+ node = None
191
+ # look for the certain place in user_shapes
192
+ for node_cur in user_shapes:
193
+ if node_cur.get('input_name') == name:
194
+ node = node_cur
195
+ break
196
+ if node is None:
197
+ raise Error("Please check correctness of the command-line. "
198
+ "Place (operation or tensor) with name {} is not found.".format(name))
199
+ place = node.get('node')
200
+
201
+ if node.get('data_type'):
202
+ dtype = node['data_type']
203
+ ov_type = Type(dtype)
204
+ else:
205
+ # we need to detect type of Placeholder
206
+ try:
207
+ ov_type = input_model.get_element_type(place)
208
+ except NotImplementedFailure:
209
+ raise Error("Please specify type for value freezing {} node explicitly "
210
+ "because the frontend does not support automatic type detection.".format(name))
211
+ # in case of cutting graph (or using custom inputs) and unspecified or dynamic type,
212
+ # the default type is fp32
213
+ if ov_type == Type.undefined or ov_type == Type.dynamic:
214
+ ov_type = Type.f32
215
+ dtype = get_numpy_ctype(ov_type)
216
+
217
+ input_model.set_element_type(place, ov_type)
218
+ # prepare and cast value to dtype
219
+ if isinstance(value, list):
220
+ casted_list = list()
221
+ for v in mo_array(value):
222
+ casted_list.append(np_map_cast[dtype](v))
223
+ value = mo_array(casted_list, dtype=dtype)
224
+ else:
225
+ value = np_map_cast[dtype](value)
226
+ value = np.array(value, dtype=dtype)
227
+
228
+ ov_shape = input_model.get_partial_shape(place)
229
+ if node.get('shape'):
230
+ # set user defined shape
231
+ ov_shape = PartialShape(node['shape'])
232
+ input_model.set_partial_shape(place, ov_shape)
233
+ elif ov_shape.is_dynamic:
234
+ # in case of dynamic shape (dynamic rank or dynamic dimension)
235
+ # deduce it based on the value shape and set it
236
+ ov_shape = PartialShape(value.shape)
237
+ input_model.set_partial_shape(place, ov_shape)
238
+
239
+ input_model.set_tensor_value(place, value)
240
+
241
+ def shape_to_array(shape: PartialShape):
242
+ return [shape.get_dimension(i) for i in range(shape.rank.get_length())]
243
+
244
+ ov_model = moc_front_end.convert(input_model)
245
+
246
+ return ov_model
@@ -0,0 +1,220 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import argparse
5
+ import logging as log
6
+
7
+ from openvino.preprocess import PrePostProcessor # pylint: disable=no-name-in-module,import-error
8
+ # pylint: disable=no-name-in-module,import-error
9
+ from openvino.runtime import Model, Layout, PartialShape
10
+ from openvino.tools.ovc.error import Error
11
+ from openvino.tools.ovc.moc_frontend.layout_utils import update_layout_to_dict
12
+ from openvino.tools.ovc.utils import refer_to_faq_msg
13
+
14
+
15
+ def check_keys_valid(ov_function: Model, dict_to_validate: dict, search_outputs: bool):
16
+ """
17
+ Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs
18
+ Throws if some key is not found
19
+ Throws if some different keys point to the same actual input/output
20
+ """
21
+ nodes_used = {}
22
+ nodes = ov_function.inputs
23
+ if search_outputs:
24
+ nodes += ov_function.outputs
25
+
26
+ # We need to replace all node names from dict to tensor names
27
+ rename_dict = {}
28
+ # Find names for replacing
29
+ for name in dict_to_validate.keys():
30
+ for ov_node in nodes:
31
+ if name in ov_node.get_tensor().get_names():
32
+ break
33
+ elif name == ov_node.get_node().get_friendly_name():
34
+ assert len(ov_node.get_tensor().get_names()) > 0, 'Node must have at least one tensor name'
35
+ new_name = list(ov_node.get_tensor().get_names())[0]
36
+ rename_dict[name] = new_name
37
+ break
38
+
39
+ # Replace found node names with tensor names
40
+ for name, new_name in rename_dict.items():
41
+ assert name in dict_to_validate, 'Key {} is not in initial dict'.format(name)
42
+ assert new_name not in dict_to_validate, 'Key {} is already in initial dict'.format(new_name)
43
+ dict_to_validate[new_name] = dict_to_validate[name]
44
+ del dict_to_validate[name]
45
+
46
+ # validate the dict
47
+ for name in dict_to_validate.keys():
48
+ node_found = False
49
+ for ov_node in nodes:
50
+ if name in ov_node.get_tensor().get_names():
51
+ if ov_node in nodes_used:
52
+ raise Error('Key for {} and {} point to same model input/output.'
53
+ .format(name, nodes_used[ov_node]))
54
+ nodes_used[ov_node] = name
55
+ node_found = True
56
+ break
57
+
58
+ if not node_found:
59
+ if not search_outputs:
60
+ raise Error('Input with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83)))
61
+ else:
62
+ raise Error('Input/Output with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83)))
63
+
64
+
65
+ def update_layout_is_input_flag(ov_function: Model, layout_values: dict):
66
+ """
67
+ Internal function: updates layout_values with flag whether each layout belongs to input or to output
68
+ """
69
+ for name, layout_value in layout_values.items():
70
+ layout_value['is_input'] = False
71
+ for ov_input in ov_function.inputs:
72
+ if name in ov_input.get_tensor().get_names():
73
+ layout_value['is_input'] = True
74
+ break
75
+ return layout_values
76
+
77
+
78
+ def find_channels_dimension(shape: PartialShape, num_channels: int, name: str, layout_values):
79
+ """
80
+ Internal function. Finds dimension index matching with expected channels number
81
+ Raises error if there is no candidates or number of candidates is > 1
82
+ :param: shape Parameter's partial shape
83
+ :param: num_channels Number of channels to find in shape
84
+ :param: name Parameter's name, used for Error-handling purposes
85
+ :param: layout_values Existing source/target layout items specified by user
86
+ :return: updated layout items with guessed layouts
87
+ """
88
+ if shape.rank.is_dynamic:
89
+ raise Error('Can\'t determine channels dimension for dynamic shape for parameter {}.'
90
+ .format(name))
91
+
92
+ dim_idx_found = -1
93
+ for dim_idx in range(shape.rank.get_length()):
94
+ dim = shape.get_dimension(dim_idx)
95
+ if dim.is_static and dim.get_length() == num_channels:
96
+ if dim_idx_found >= 0:
97
+ raise Error('Can\'t determine channels dimension for {}. '
98
+ 'Input shape is {}, needed channels {}. '
99
+ 'Conflicting dimensions: {} and {}. Please specify layout manually.'
100
+ .format(name, shape, num_channels, dim_idx_found, dim_idx))
101
+ dim_idx_found = dim_idx
102
+ if dim_idx_found < 0:
103
+ raise Error('Can\'t determine channels dimension for {}. '
104
+ 'Input shape is {}, needed channels {}'
105
+ .format(name, shape, num_channels))
106
+
107
+ # Restrict guessed channels index to particular position depending on tensor shape(3d, 4d, 5d)
108
+ if shape.rank.get_length() == 3:
109
+ # CHW or HWC, possible channels index is 0 or 2
110
+ if dim_idx_found != 0 and dim_idx_found != 2:
111
+ raise Error('Can\'t determine channels dimension for 3D input {} (CHW or HWC) with shape {}. '
112
+ 'Please specify layout containing \'C\' channels manually.'.format(name, shape))
113
+ elif shape.rank.get_length() == 4:
114
+ # NCHW or NHWC, possible channels index is 1 or 3
115
+ if dim_idx_found != 1 and dim_idx_found != 3:
116
+ raise Error('Can\'t determine channels dimension for 4D input {} (NCHW or NHWC) with shape {}. '
117
+ 'Please specify layout containing \'C\' channels manually.'.format(name, shape))
118
+ elif shape.rank.get_length() == 5:
119
+ # NCDHW or NDHWC, possible channels index is 1 or 4
120
+ if dim_idx_found != 1 and dim_idx_found != 4:
121
+ raise Error('Can\'t determine channels dimension for 5D input {} (NCDHW or NDHWC) with shape {}. '
122
+ 'Please specify layout containing \'C\' channels manually.'.format(name, shape))
123
+ else:
124
+ raise Error('Can\'t determine channels dimension for {}D input {} with shape {}.'
125
+ 'Please specify layout containing \'C\' channels manually.'
126
+ .format(shape.rank.get_length(), name, shape))
127
+
128
+ layout_str = "?" * shape.rank.get_length()
129
+ layout_str = layout_str[:dim_idx_found] + 'C' + layout_str[dim_idx_found + 1:]
130
+ layout_values[name] = {
131
+ 'source_layout': layout_str,
132
+ 'target_layout': None,
133
+ 'source_guessed': True,
134
+ 'is_input': True
135
+ }
136
+ return layout_values
137
+
138
+
139
+ def update_tensor_names_to_first_in_sorted_list(values_dict: dict, ov_function: Model):
140
+ if not isinstance(values_dict, dict):
141
+ return values_dict
142
+ updated_dict = {}
143
+ used_nodes = {}
144
+ for name, value in values_dict.items():
145
+ input_found = False
146
+ for input in ov_function.inputs:
147
+ tensor_names = list(input.names)
148
+ tensor_names.sort()
149
+ if not (name in tensor_names or name == input.node.get_friendly_name()):
150
+ continue
151
+ if input in used_nodes:
152
+ raise Error("Tensor names {} and {} refer to the same node.".format(name, used_nodes[input]))
153
+ used_nodes.update({input: name})
154
+ updated_dict[tensor_names[0]] = value
155
+ input_found = True
156
+ break
157
+ if not input_found:
158
+ raise Error('Input with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83)))
159
+
160
+ return updated_dict
161
+
162
+
163
+ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
164
+ """
165
+ Applies pre-processing of model inputs by adding appropriate operations
166
+ On return, 'ov_function' object will be updated
167
+ Expected 'argv.mean_scale_values' formats examples:
168
+ a) Dict: {'inputName': {'mean': [1., 2., 3.], 'scale': [2., 4., 8.]}}
169
+ b) List: list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])),
170
+ (np.array([7., 8., 9.]), np.array([5., 6., 7.])))
171
+ Expected 'argv.layout_values' format examples:
172
+ a) Specific layouts for inputs and outputs
173
+ { 'input1': {
174
+ 'source_layout': 'nchw',
175
+ 'target_layout': 'nhwc'
176
+ },
177
+ 'output2': {
178
+ 'source_layout': 'nhwc'
179
+ }
180
+ }
181
+ b) Layout for single input: {'': {'source_layout': 'nchw'}}
182
+ :param: ov_function OV function for applying mean/scale pre-processing
183
+ :param: argv Parsed command line arguments
184
+ """
185
+ prep = PrePostProcessor(ov_function)
186
+
187
+ layout_values = {}
188
+ if 'layout_values' in argv and argv.layout_values:
189
+ layout_values = update_layout_to_dict(ov_function.inputs, argv.layout_values,
190
+ lambda ov_input: ov_input.get_tensor().get_names())
191
+
192
+ check_keys_valid(ov_function=ov_function, dict_to_validate=layout_values, search_outputs=True)
193
+
194
+ layout_values = update_layout_is_input_flag(ov_function, layout_values)
195
+
196
+ for node_name, layout_value in layout_values.items():
197
+ if layout_value.get('source_layout'):
198
+ if layout_value.get('is_input'):
199
+ prep.input(node_name).model().set_layout(Layout(layout_value['source_layout']))
200
+ else:
201
+ prep.output(node_name).model().set_layout(Layout(layout_value['source_layout']))
202
+ if layout_value.get('target_layout'):
203
+ if layout_value.get('is_input'):
204
+ prep.input(node_name).tensor().set_layout(Layout(layout_value['target_layout']))
205
+ else:
206
+ prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout']))
207
+
208
+ # Apply pre-processing builder to a function
209
+ ov_function = prep.build()
210
+
211
+ # Remove guessed layout values from ov_function (these values shall not be serialized to IR
212
+ for node_name, layout_value in layout_values.items():
213
+ if layout_value.get('source_guessed') and \
214
+ not layout_value.get('target_layout'):
215
+ # search for parameter object
216
+ for idx, ov_input in enumerate(ov_function.inputs):
217
+ if node_name in ov_input.get_tensor().get_names():
218
+ log.debug('Clearing guessed layout {} for {}'
219
+ .format(layout_value['source_layout'], node_name))
220
+ ov_function.get_parameters()[idx].layout = Layout()