bigdl-core-npu 2.5.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. bigdl_core_npu-2.5.0.dist-info/METADATA +35 -0
  2. bigdl_core_npu-2.5.0.dist-info/RECORD +223 -0
  3. bigdl_core_npu-2.5.0.dist-info/WHEEL +5 -0
  4. bigdl_core_npu-2.5.0.dist-info/top_level.txt +1 -0
  5. intel_npu_acceleration_library/__init__.py +24 -0
  6. intel_npu_acceleration_library/_version.py +6 -0
  7. intel_npu_acceleration_library/backend/__init__.py +37 -0
  8. intel_npu_acceleration_library/backend/base.py +215 -0
  9. intel_npu_acceleration_library/backend/bindings.py +279 -0
  10. intel_npu_acceleration_library/backend/compression.py +24 -0
  11. intel_npu_acceleration_library/backend/convolution.py +58 -0
  12. intel_npu_acceleration_library/backend/factory.py +944 -0
  13. intel_npu_acceleration_library/backend/linear.py +60 -0
  14. intel_npu_acceleration_library/backend/matmul.py +59 -0
  15. intel_npu_acceleration_library/backend/mlp.py +58 -0
  16. intel_npu_acceleration_library/backend/ops.py +141 -0
  17. intel_npu_acceleration_library/backend/qlinear.py +71 -0
  18. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  19. intel_npu_acceleration_library/backend/runtime.py +210 -0
  20. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  21. intel_npu_acceleration_library/backend/tensor.py +1050 -0
  22. intel_npu_acceleration_library/backend/utils.py +70 -0
  23. intel_npu_acceleration_library/compiler.py +194 -0
  24. intel_npu_acceleration_library/device.py +230 -0
  25. intel_npu_acceleration_library/dtypes.py +122 -0
  26. intel_npu_acceleration_library/external/openvino/__init__.py +71 -0
  27. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +20 -0
  28. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  29. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  30. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  31. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  32. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  33. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  34. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  35. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  36. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +352 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +139 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +98 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +119 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +289 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +118 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +536 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +256 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +460 -0
  75. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  76. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  77. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  78. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +26 -0
  79. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  80. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  81. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +4 -0
  82. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  83. intel_npu_acceleration_library/external/openvino/properties/__init__.py +21 -0
  84. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  85. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  86. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  87. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  88. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  89. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  90. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  91. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  92. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  93. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  94. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  95. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  96. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +18 -0
  97. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  98. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  99. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3067 -0
  100. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  101. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  102. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +399 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +10 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +85 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +189 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +783 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +38 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +429 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +70 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  144. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  145. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  146. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  147. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  148. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  149. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  150. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  151. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  152. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  153. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  154. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  155. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  156. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  157. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  158. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  159. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  160. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +536 -0
  161. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  162. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  163. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  164. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +35 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +246 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +205 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +109 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  186. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  187. intel_npu_acceleration_library/external/openvino/utils.py +98 -0
  188. intel_npu_acceleration_library/functional/__init__.py +8 -0
  189. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  190. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  191. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  192. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  193. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  194. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  195. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  196. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  197. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  198. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  199. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  200. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  201. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  202. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  214. intel_npu_acceleration_library/modelling.py +150 -0
  215. intel_npu_acceleration_library/nn/__init__.py +20 -0
  216. intel_npu_acceleration_library/nn/autograd.py +68 -0
  217. intel_npu_acceleration_library/nn/conv.py +257 -0
  218. intel_npu_acceleration_library/nn/functional.py +1207 -0
  219. intel_npu_acceleration_library/nn/linear.py +162 -0
  220. intel_npu_acceleration_library/nn/llm.py +417 -0
  221. intel_npu_acceleration_library/nn/module.py +393 -0
  222. intel_npu_acceleration_library/optimizations.py +157 -0
  223. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,256 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+ import torch
8
+ import numpy as np
9
+
10
+ from openvino.runtime import op, Type as OVType, Shape, Tensor
11
+ from openvino.runtime import opset11 as ops
12
+
13
+
14
+ def make_constant(*args, **kwargs):
15
+ return op.Constant(*args, **kwargs)
16
+
17
+
18
+ def fetch_attr(self_module, target: str):
19
+ """
20
+ Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
21
+
22
+ Args:
23
+ target (str): The fully-qualified name of the attribute to fetch
24
+
25
+ Return:
26
+ Any: The value of the attribute.
27
+ """
28
+ target_atoms = target.split('.')
29
+ attr_itr = self_module
30
+ for i, atom in enumerate(target_atoms):
31
+ if not hasattr(attr_itr, atom):
32
+ raise RuntimeError(
33
+ f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
34
+ attr_itr = getattr(attr_itr, atom)
35
+ return attr_itr
36
+
37
+
38
+ def get_type_from_py_type(value):
39
+ if isinstance(value, float):
40
+ return OVType.f32
41
+ if isinstance(value, bool):
42
+ return OVType.boolean
43
+ if isinstance(value, int):
44
+ return OVType.i64
45
+ return OVType.dynamic
46
+
47
+
48
+ def torch_tensor_to_ov_const(torch_t: torch.Tensor, shared_memory=True):
49
+ torch_t = torch_t.contiguous()
50
+ if torch_t.dtype == torch.bfloat16:
51
+ # reinterpret bfloat16 data as float16 to allow conversion to numpy
52
+ torch_t = torch_t.view(torch.float16)
53
+ narr = torch_t.numpy(force=True)
54
+ tensor = Tensor(narr, torch_t.shape, OVType.bf16)
55
+ ov_const = op.Constant(tensor, shared_memory=shared_memory)
56
+ else:
57
+ narr = torch_t.numpy(force=True)
58
+ ov_const = op.Constant(narr, shared_memory=shared_memory)
59
+ return ov_const
60
+
61
+
62
+ def ivalue_to_constant(ivalue, shared_memory=True):
63
+ ov_type = get_type_from_py_type(ivalue)
64
+ if ov_type.is_static():
65
+ return op.Constant(ov_type, Shape([]), [ivalue]).outputs()
66
+
67
+ if isinstance(ivalue, (list, tuple)):
68
+ assert len(ivalue) > 0, "Can't deduce type for empty list"
69
+ ov_type = get_type_from_py_type(ivalue[0])
70
+ assert ov_type.is_static(), "Can't deduce type for list"
71
+ return op.Constant(ov_type, Shape([len(ivalue)]), ivalue).outputs()
72
+
73
+ if isinstance(ivalue, torch.Tensor):
74
+ return torch_tensor_to_ov_const(ivalue, shared_memory=shared_memory).outputs()
75
+ return None
76
+
77
+
78
+ def get_value_from_getattr(getattr_node, self_module):
79
+ assert getattr_node.kind() == "prim::GetAttr", "Got node of kind not equal to prim::GetAttr"
80
+ # GetAttr nodes can be nested
81
+ stack = []
82
+ while getattr_node.kind() == "prim::GetAttr":
83
+ stack.append(getattr_node)
84
+ inputs = list(getattr_node.inputs())
85
+ if len(inputs) == 0:
86
+ break
87
+ getattr_node = inputs[0].node()
88
+ module = self_module
89
+ path_name = "self"
90
+ while len(stack) > 0:
91
+ node = stack.pop()
92
+ attr_name = node.s("name")
93
+ assert hasattr(
94
+ module, attr_name), f"No attribute with name \"{attr_name}\" found in module."
95
+ path_name = ".".join([path_name, attr_name])
96
+ module = getattr(module, attr_name)
97
+ return module, path_name
98
+
99
+ def graph_has_ops(graph, op_types:list) -> bool:
100
+ res = False
101
+ for n in graph.nodes():
102
+ if any(kind in n.kind() for kind in op_types):
103
+ return True
104
+ for b in n.blocks():
105
+ res = graph_has_ops(b, op_types)
106
+ if res:
107
+ return res
108
+ return res
109
+
110
+
111
+ pt_to_ov_type_map = {
112
+ "float": OVType.f32,
113
+ "int": OVType.i64,
114
+ "bool": OVType.boolean,
115
+ "torch.bfloat16": OVType.bf16,
116
+ "torch.float16": OVType.f16,
117
+ "torch.float32": OVType.f32,
118
+ "torch.float64": OVType.f64,
119
+ "torch.uint8": OVType.u8,
120
+ "torch.int8": OVType.i8,
121
+ "torch.int16": OVType.i16,
122
+ "torch.int32": OVType.i32,
123
+ "torch.int64": OVType.i64,
124
+ "torch.bool": OVType.boolean,
125
+ "torch.DoubleTensor": OVType.f64,
126
+ "torch.FloatTensor": OVType.f32,
127
+ "torch.HalfTensor": OVType.f16,
128
+ "torch.BFloat16Tensor": OVType.bf16,
129
+ "torch.IntTensor": OVType.i32,
130
+ "torch.LongTensor": OVType.i64,
131
+ "torch.ShortTensor": OVType.i16,
132
+ "torch.CharTensor": OVType.i8,
133
+ "torch.ByteTensor": OVType.u8,
134
+ "torch.BoolTensor": OVType.boolean,
135
+ "torch.quint8": OVType.u8,
136
+ "torch.qint8": OVType.i8,
137
+ "torch.qint32": OVType.i32
138
+ }
139
+
140
+
141
+ wrapper_template = """
142
+ import torch
143
+ from typing import *
144
+
145
+ class ModelWrapper(torch.nn.Module):
146
+ def __init__(self, model):
147
+ super().__init__()
148
+ self.model = model
149
+
150
+ def forward(self, {input_sign}):
151
+ return self.model({example_input})
152
+ """
153
+
154
+
155
+ def process_dict_inputs(inputs, input_params, model):
156
+ ordered_inputs = []
157
+ for input_name in input_params:
158
+ if input_name in inputs:
159
+ ordered_inputs.append(input_name)
160
+
161
+ input_signature = list(input_params)
162
+ if ordered_inputs == input_signature[:len(ordered_inputs)]:
163
+ example_inputs = [inputs[input_name] for input_name in ordered_inputs]
164
+ if all([isinstance(inp, torch.Tensor) for inp in example_inputs]):
165
+ return {"example_inputs": [inputs[name] for name in ordered_inputs]}, ordered_inputs, model
166
+ return {"example_inputs": example_inputs}, ordered_inputs, model
167
+
168
+ # PyTorch has some difficulties to trace models with named unordered parameters:
169
+ # torch < 2.0.0 supports only positional arguments for tracing
170
+ # pytorch == 2.0.0 supports input kwargs tracing,
171
+ # but does not support complex nested objects (e. g. tuple of tuples of tensors)
172
+ # We will use wrapper for making them positional as workaround.
173
+
174
+ input_sign_str = []
175
+ input_params_str = []
176
+
177
+ for input_name in ordered_inputs:
178
+ if str(input_params[input_name].annotation).startswith("typing.Union"):
179
+ filter_custom_args = []
180
+ for arg in input_params[input_name].annotation.__args__:
181
+ str_arg = str(arg)
182
+ is_typing = str_arg.startswith("typing.")
183
+ is_torch = "torch." in str_arg
184
+ is_builten = str_arg in (str(int), str(float), str(type(None)))
185
+ if not (is_typing or is_torch or is_builten):
186
+ continue
187
+ filter_custom_args.append(arg)
188
+ input_params[input_name].annotation.__args__ = tuple(
189
+ filter_custom_args)
190
+ input_sign_str.append(
191
+ str(input_params[input_name]).replace("NoneType", "None"))
192
+ input_params_str.append(f"{input_name}={input_name}")
193
+
194
+ wrapper_class = wrapper_template.format(input_sign=', '.join(
195
+ input_sign_str), example_input=', '.join(input_params_str))
196
+ result = {}
197
+ try:
198
+ exec(wrapper_class, result)
199
+
200
+ wrapped_model = result["ModelWrapper"](model)
201
+ wrapped_model.eval()
202
+ # if wrapping failed, it is better to return original model for avoid user confusion regarding error message
203
+ except Exception:
204
+ wrapped_model = model
205
+
206
+ return {"example_inputs": [inputs[name] for name in ordered_inputs]}, ordered_inputs, wrapped_model
207
+
208
+
209
+ def prepare_example_inputs_and_model(inputs, input_params, model):
210
+ input_is_list = False
211
+ input_signature = list(input_params)
212
+ if isinstance(inputs, dict):
213
+ examples, ordered, wrapped = process_dict_inputs(inputs, input_params, model)
214
+ return examples, ordered, wrapped, input_is_list
215
+ if isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], torch.Tensor):
216
+ if "typing.List" in str(input_params[input_signature[0]].annotation):
217
+ inputs = inputs[0].unsqueeze(0)
218
+ input_is_list = True
219
+
220
+ if isinstance(inputs, torch.Tensor):
221
+ inputs = [inputs]
222
+ input_signature = input_signature[:len(inputs)]
223
+ return {"example_inputs": inputs}, input_signature, model, input_is_list
224
+
225
+
226
+ def convert_quantized_tensor(qtensor: torch.Tensor, shared_memory: bool):
227
+ # represents torch quantized tensor as
228
+ # Constant(u8) -> Convert(f32) -> Subtract(zero_point) -> Multiply(scale)
229
+ qscheme = qtensor.qscheme()
230
+ if qscheme == torch.per_channel_affine:
231
+ int8_tensor = qtensor.int_repr()
232
+ scale = qtensor.q_per_channel_scales().numpy().astype(np.float32)
233
+ zero_point = qtensor.q_per_channel_zero_points().numpy().astype(np.float32)
234
+ axis = np.int32(qtensor.q_per_channel_axis())
235
+
236
+ new_shape = np.ones(len(int8_tensor.shape), dtype=np.int32)
237
+ new_shape[axis] = -1
238
+ zero_point_bc = np.reshape(zero_point, new_shape)
239
+ scale_bc = np.reshape(scale, new_shape)
240
+
241
+ int8_const = torch_tensor_to_ov_const(
242
+ int8_tensor, shared_memory=shared_memory)
243
+ convert = ops.convert(int8_const, np.float32)
244
+ sub = ops.subtract(convert, zero_point_bc)
245
+ return ops.multiply(sub, scale_bc).outputs()
246
+ elif qscheme == torch.per_tensor_affine:
247
+ int8_tensor = qtensor.int_repr()
248
+ scale = np.float32(qtensor.q_scale())
249
+ zero_point = np.float32(qtensor.q_zero_point())
250
+
251
+ int8_const = torch_tensor_to_ov_const(
252
+ int8_tensor, shared_memory=shared_memory)
253
+ convert = ops.convert(int8_const, np.float32)
254
+ sub = ops.subtract(convert, zero_point)
255
+ return ops.multiply(sub, scale).outputs()
256
+ assert False, "Unsupported qscheme"
@@ -0,0 +1,16 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Low level wrappers for the FrontEnd C++ API.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ try:
12
+ from openvino.frontend.tensorflow.py_tensorflow_frontend import _FrontEndPyGraphIterator as GraphIterator
13
+ from openvino.frontend.tensorflow.py_tensorflow_frontend import ConversionExtensionTensorflow as ConversionExtension
14
+ from openvino.frontend.tensorflow.py_tensorflow_frontend import OpExtensionTensorflow as OpExtension
15
+ except ImportError as err:
16
+ raise ImportError("OpenVINO Tensorflow frontend is not available, please make sure the frontend is built. " "{}".format(err))
@@ -0,0 +1,116 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+ import tensorflow as tf
8
+ from openvino.frontend.tensorflow.node_decoder import TFGraphNodeDecoder
9
+ from openvino.frontend.tensorflow.py_tensorflow_frontend import _FrontEndPyGraphIterator as GraphIterator
10
+
11
+
12
+ class GraphIteratorTFGraph(GraphIterator):
13
+ def __init__(self, tf_graph: tf.Graph, share_weights: bool, inner_graph: bool = False,
14
+ input_names_map: dict = None, output_names_map: dict = None):
15
+ GraphIterator.__init__(self)
16
+ self.m_graph = tf_graph
17
+ self.m_node_index = 0
18
+ self.m_decoders = []
19
+ self.m_inner_graph = inner_graph
20
+ self.m_share_weights = share_weights
21
+ self.m_input_names_map = input_names_map or {}
22
+ self.m_output_names_map = output_names_map or {}
23
+ self.m_vars = None
24
+ if hasattr(tf_graph, "variables"):
25
+ # This field is needed to keep the link to graph variables,
26
+ # otherwise Python releases memory kept by variables when it is accessed from c++ bindings
27
+ self.m_vars = tf_graph.variables
28
+
29
+ for op in tf_graph.get_operations():
30
+ self.m_decoders.append(TFGraphNodeDecoder(op, share_weights, inner_graph))
31
+
32
+ self.m_iterators = {}
33
+ for func_name, _ in self.m_graph._functions.items():
34
+ self.m_iterators[func_name] = None
35
+
36
+ def get_input_names(self) -> list:
37
+ # returns a vector of input names in the original order
38
+ # Note: used only for the library functions
39
+ if not self.m_inner_graph:
40
+ return []
41
+ inp_ops = filter(lambda op: op.type == "Placeholder", self.m_graph.get_operations())
42
+ inp_names = []
43
+ if hasattr(self.m_graph, 'inputs') and self.m_graph.inputs:
44
+ for inp in self.m_graph.inputs:
45
+ inp_names.append(inp.name)
46
+ return inp_names
47
+ for inp in inp_ops:
48
+ assert isinstance(inp, tf.Operation), "Unknown node type. Expected tf.Operation, got {}".format(type(inp))
49
+ assert hasattr(inp, "node_def") and isinstance(inp.node_def, tf.compat.v1.NodeDef), \
50
+ "Could not find node_def in node {}".format(inp.name)
51
+ type_attr = inp.node_def.attr["dtype"].type
52
+
53
+ # Placeholders with type "resource" have exact values in "variables" field,
54
+ # so they are passed to TF FE as constants.
55
+ # For this reason they are not listed as model inputs.
56
+ if tf.dtypes.DType(type_attr).name != "resource" or self.m_inner_graph:
57
+ inp_names.append(inp.name)
58
+ return inp_names
59
+
60
+ def get_output_names(self) -> list:
61
+ # returns a vector of output names in the original order
62
+ # Note: used only for the library functions
63
+ if not self.m_inner_graph:
64
+ return []
65
+
66
+ if hasattr(self.m_graph, 'outputs') and self.m_graph.outputs:
67
+ outputs = []
68
+ for out in self.m_graph.outputs:
69
+ outputs.append(out.name)
70
+ return outputs
71
+ # If graph has no 'outputs' field, find nodes without outputs and consider them graph outputs.
72
+ # The order of outputs is important and wrong order may lead to conversion error.
73
+ non_outputs = set()
74
+ for op in self.m_graph.get_operations():
75
+ assert isinstance(op, tf.Operation), "Unknown node type. Expected tf.Operation, got {}".format(type(op))
76
+ for inp in op.inputs:
77
+ non_outputs.add(inp.op.name)
78
+
79
+ outputs = []
80
+ for op in self.m_graph.get_operations():
81
+ if op.name not in non_outputs:
82
+ for output in op.outputs:
83
+ outputs = [output.name] + outputs
84
+ return outputs
85
+
86
+ def get_input_names_map(self) -> dict:
87
+ # returns a map from (user-defined) external tensor name to internal name for inputs
88
+ return self.m_input_names_map
89
+
90
+ def get_output_names_map(self) -> dict:
91
+ # returns a map from (user-defined) external tensor name to internal name for outputs
92
+ return self.m_output_names_map
93
+
94
+ def is_end(self) -> bool:
95
+ return self.m_node_index >= len(self.m_decoders)
96
+
97
+ def reset(self):
98
+ self.m_node_index = 0
99
+
100
+ def size(self) -> int:
101
+ return len(self.m_decoders)
102
+
103
+ def next_impl(self):
104
+ self.m_node_index += 1
105
+
106
+ def get_decoder(self):
107
+ return self.m_decoders[self.m_node_index]
108
+
109
+ def get_body_graph_iterator(self, func_name):
110
+ if func_name not in self.m_iterators:
111
+ return None
112
+ if self.m_iterators[func_name] is None:
113
+ self.m_iterators[func_name] = GraphIteratorTFGraph(self.m_graph._functions[func_name].graph,
114
+ self.m_share_weights,
115
+ True)
116
+ return self.m_iterators[func_name]
@@ -0,0 +1,219 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+ import numpy as np
8
+ import tensorflow as tf
9
+ from openvino.frontend.tensorflow.py_tensorflow_frontend import _FrontEndDecoderBase as DecoderBase
10
+ from openvino.runtime import PartialShape, Type, OVAny, Tensor
11
+
12
+
13
+ def tf_type_to_ov_type(tf_type_int):
14
+ tf_type = tf.dtypes.as_dtype(tf_type_int)
15
+ if tf_type.name == "variant":
16
+ return Type.dynamic
17
+ if tf_type.name == "string":
18
+ return Type.string
19
+ numpy_type = tf_type.as_numpy_dtype
20
+ try:
21
+ ret_type = Type(numpy_type)
22
+ except:
23
+ ret_type = Type.undefined
24
+ return ret_type
25
+
26
+
27
+ def tf_attr_to_numpy(attr):
28
+ attr_type = attr.WhichOneof("value")
29
+ # described in https://www.tensorflow.org/api_docs/python/tf/compat/v1/AttrValue
30
+ if attr_type == "func":
31
+ return attr.func.name
32
+ elif attr_type == "s":
33
+ try:
34
+ return attr.s.decode("utf-8")
35
+ except UnicodeDecodeError:
36
+ return attr.s
37
+ elif attr_type == "f":
38
+ return np.float32(attr.f)
39
+ elif attr_type == "type":
40
+ return tf_type_to_ov_type(attr.type)
41
+ elif attr_type == "list":
42
+ list_value = attr.list
43
+ fields = list_value.ListFields()
44
+ if fields and len(fields) > 0 and len(fields[0]) > 1:
45
+ return list(fields[0][1])
46
+ else:
47
+ return None
48
+ elif attr_type == "shape":
49
+ tf_shape = attr.shape
50
+ if tf_shape.unknown_rank:
51
+ return PartialShape.dynamic()
52
+ shape_dims = tf_shape.dim
53
+ shape = [dim.size for dim in shape_dims]
54
+ return PartialShape(shape)
55
+ elif attr_type is None:
56
+ return None
57
+ return getattr(attr, attr.WhichOneof("value"))
58
+
59
+
60
+ def tf_attr_to_ov(attr):
61
+ return OVAny(tf_attr_to_numpy(attr))
62
+
63
+
64
+ class TFGraphNodeDecoder(DecoderBase):
65
+ def __init__(self, operation: tf.Operation, share_weights: bool, inner_graph: bool):
66
+ DecoderBase.__init__(self)
67
+ assert isinstance(operation, tf.Operation), "Unknown operation type. " \
68
+ "Expected tf.Operation, got {}".format(type(operation))
69
+ self.m_operation = operation
70
+ self.m_inner_graph = inner_graph
71
+ self.m_data_type = None
72
+ self.m_parsed_content = None
73
+
74
+ # Copies value from inner buffer of TF_Operation to NodeDef class.
75
+ self.m_node_def = self.m_operation.node_def
76
+ self.m_shared_memory = share_weights
77
+
78
+ if self.m_operation.type == "Const":
79
+ self.m_data_type = tf.dtypes.DType(self.m_node_def.attr["dtype"].type).name
80
+
81
+ # Copies tensor value to parsed TensorProto
82
+ value = self.m_node_def.attr["value"].tensor
83
+
84
+ # As the tensor was copied, shared memory may be lost
85
+ # after destruction of GraphIteratorTFGraph() when convert_model() finishes its work.
86
+ # To prevent it we need to turn off sharing.
87
+ self.m_shared_memory = False
88
+
89
+ if self.m_data_type == "string":
90
+ self.m_parsed_content = [str(val) for val in value.string_val]
91
+ else:
92
+ if value.tensor_content:
93
+ shape = [d.size for d in value.tensor_shape.dim]
94
+ tensor_dtype = tf.dtypes.as_dtype(value.dtype)
95
+ dtype = tensor_dtype.as_numpy_dtype
96
+ # no copy of content
97
+ self.m_parsed_content = (np.frombuffer(value.tensor_content,
98
+ dtype=dtype).reshape(shape))
99
+ else:
100
+ # TODO: remove copy of content for cases when tensor value is not in tensor_content field, ticket: 114797
101
+ self.m_parsed_content = tf.make_ndarray(value)
102
+
103
+ if self.m_operation.type == "Placeholder":
104
+ self.m_data_type = tf.dtypes.DType(self.m_node_def.attr["dtype"].type).name
105
+
106
+ if not self.m_inner_graph:
107
+ variable_value = TFGraphNodeDecoder.get_variable(self.m_operation)
108
+ if variable_value is not None:
109
+ # Disable sharing for variables which are not on CPU
110
+ if "device:CPU" not in variable_value.device:
111
+ self.m_shared_memory = False
112
+ # does not copy data
113
+ self.m_parsed_content = variable_value.__array__()
114
+
115
+ if isinstance(self.m_parsed_content, bytes):
116
+ self.m_data_type = "string"
117
+ self.m_parsed_content = [str(self.m_parsed_content)]
118
+
119
+ def get_op_name(self) -> str:
120
+ return self.m_operation.name
121
+
122
+ def get_op_type(self) -> str:
123
+ if self.m_operation.type == "Placeholder":
124
+ type_attr = tf.dtypes.DType(self.m_node_def.attr["dtype"].type)
125
+ if not self.m_inner_graph and self.m_parsed_content is not None:
126
+ if TFGraphNodeDecoder.get_variable(self.m_operation) is not None:
127
+ return "Const"
128
+ raise Exception("Could not get variable for resource Placeholder {0}".format(self.m_operation.name))
129
+ return self.m_operation.type
130
+
131
+ @staticmethod
132
+ def get_variable(operation):
133
+ tf_graph = operation.graph
134
+ if not hasattr(tf_graph, "captures"):
135
+ return None
136
+ for var_tensor, op_tensor in tf_graph.captures:
137
+ if operation.outputs[0].name == op_tensor.name:
138
+ if var_tensor.dtype.name != 'resource':
139
+ return var_tensor
140
+ for variable_value in operation.graph.variables:
141
+ if id(variable_value.handle) == id(var_tensor):
142
+ return variable_value.read_value_no_copy()
143
+ return None
144
+ return None
145
+
146
+ def get_attribute(self, name):
147
+ if name == "shape" or name == "_output_shapes":
148
+ if self.m_node_def.attr["shape"].shape.unknown_rank:
149
+ return OVAny(PartialShape.dynamic())
150
+ shape_dims = self.m_node_def.attr["shape"].shape.dim
151
+ shape = [dim.size for dim in shape_dims]
152
+ type_num = self.m_node_def.attr["dtype"].type
153
+ if type_num is not None and tf.dtypes.DType(type_num).name == "resource":
154
+ if self.m_inner_graph:
155
+ return OVAny(PartialShape.dynamic())
156
+ variable_value = TFGraphNodeDecoder.get_variable(self.m_operation)
157
+ if variable_value is None:
158
+ # variable can be not found if this is Hash table
159
+ return OVAny(PartialShape.dynamic())
160
+ return OVAny(PartialShape(list(variable_value.shape)))
161
+ return OVAny(PartialShape(shape))
162
+ if name == "dtype":
163
+ type_num = self.m_node_def.attr["dtype"].type
164
+ if tf.dtypes.DType(type_num).name == "resource":
165
+ if not self.m_inner_graph:
166
+ variable_value = TFGraphNodeDecoder.get_variable(self.m_operation)
167
+ if variable_value is None:
168
+ # variable can be not found if this is Hash table
169
+ return OVAny(Type.dynamic)
170
+ return OVAny(tf_type_to_ov_type(variable_value.dtype))
171
+ else:
172
+ return OVAny(Type.undefined)
173
+ return OVAny(tf_type_to_ov_type(type_num))
174
+
175
+ if name == "value":
176
+ if self.m_data_type == 'string':
177
+ return OVAny(Tensor(self.m_parsed_content))
178
+ if self.m_parsed_content.size == 1:
179
+ if isinstance(self.m_parsed_content, np.ndarray):
180
+ return OVAny(Tensor(self.m_parsed_content))
181
+ self.m_parsed_content = np.array(self.m_parsed_content)
182
+ return OVAny(Tensor(self.m_parsed_content))
183
+ ov_tensor = Tensor(self.m_parsed_content, shared_memory=self.m_shared_memory)
184
+ ov_tensor = OVAny(ov_tensor)
185
+ return ov_tensor
186
+ attr_value = self.m_node_def.attr[name]
187
+
188
+ return tf_attr_to_ov(attr_value)
189
+
190
+ def get_input_size(self) -> int:
191
+ return len(self.m_operation.inputs)
192
+
193
+ def get_input_node_name(self, input_port_idx):
194
+ assert input_port_idx >= 0, "Got negative input node index."
195
+ assert input_port_idx < len(self.m_operation.inputs), "Input node index is out of range. Got {}, " \
196
+ "when number of input nodes {}.".format(input_port_idx,
197
+ len(self.m_operation.inputs))
198
+ return self.m_operation.inputs[input_port_idx].op.name
199
+
200
+ def get_input_node_name_output_port_index(self, input_port_idx):
201
+ tensor_name = self.m_operation.inputs[input_port_idx].name
202
+ if ":" in tensor_name:
203
+ port_idx_str = tensor_name[tensor_name.rfind(":") + 1:len(tensor_name)]
204
+ if port_idx_str.isdigit():
205
+ return int(port_idx_str)
206
+ else:
207
+ return 0
208
+ return 0
209
+
210
+ def get_input_node_name_output_port_name(self, input_port_idx):
211
+ tensor_name = self.m_operation.inputs[input_port_idx].name
212
+ if ":" not in tensor_name:
213
+ return ""
214
+ first_col_idx = tensor_name.find(":")
215
+ last_col_idx = tensor_name.rfind(":")
216
+ if first_col_idx == last_col_idx:
217
+ return ""
218
+
219
+ return tensor_name[first_col_idx + 1: last_col_idx]