bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,182 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+ import jax
8
+ import jax.numpy as jnp
9
+ import numpy as np
10
+ from openvino.frontend.jax.passes import filter_element, filter_ivalue, filter_param
11
+ from openvino.runtime import op, Type as OVType, Shape, OVAny
12
+
13
+ numpy_to_ov_type_map = {
14
+ np.float32: OVType.f32,
15
+ bool: OVType.boolean,
16
+ jax.dtypes.bfloat16: OVType.bf16, # TODO: check this
17
+ np.float16: OVType.f16,
18
+ np.float32: OVType.f32,
19
+ np.float64: OVType.f64,
20
+ np.uint8: OVType.u8,
21
+ np.int8: OVType.i8,
22
+ np.uint16: OVType.u16,
23
+ np.int16: OVType.i16,
24
+ np.uint32: OVType.u32,
25
+ np.int32: OVType.i32,
26
+ np.uint64: OVType.u64,
27
+ np.int64: OVType.i64,
28
+ }
29
+
30
+ jax_to_ov_type_map = {
31
+ jnp.float32: OVType.f32,
32
+ jnp.bfloat16: OVType.bf16, # TODO: check this
33
+ jnp.float16: OVType.f16,
34
+ jnp.float64: OVType.f64,
35
+ jnp.uint8: OVType.u8,
36
+ jnp.int8: OVType.i8,
37
+ jnp.uint16: OVType.u16,
38
+ jnp.int16: OVType.i16,
39
+ jnp.uint32: OVType.u32,
40
+ jnp.int32: OVType.i32,
41
+ jnp.uint64: OVType.u64,
42
+ jnp.int64: OVType.i64,
43
+ }
44
+
45
+ try:
46
+ jax_to_ov_type_map[jnp.bool] = OVType.boolean
47
+ except:
48
+ pass
49
+
50
+ basic_to_ov_type_map = {
51
+ int: OVType.i64,
52
+ float: OVType.f32,
53
+ bool: OVType.boolean,
54
+ }
55
+
56
+ ov_type_to_int_map = {
57
+ OVType.u8: 0,
58
+ OVType.i8: 1,
59
+ OVType.i16: 2,
60
+ OVType.i32: 3,
61
+ OVType.i64: 4,
62
+ OVType.f16: 5,
63
+ OVType.f32: 6,
64
+ OVType.f64: 7,
65
+ OVType.u16: 8,
66
+ OVType.u32: 9,
67
+ OVType.u64: 10,
68
+ OVType.boolean: 11,
69
+ OVType.bf16: 15,
70
+ }
71
+
72
+
73
+ def get_type_from_py_type(value):
74
+ if isinstance(value, float):
75
+ return OVType.f32
76
+ if isinstance(value, bool):
77
+ return OVType.boolean
78
+ if isinstance(value, int):
79
+ return OVType.i64
80
+ return OVType.dynamic
81
+
82
+
83
+ def get_type_from_np_type(value):
84
+ for np_dtype, ov_type in numpy_to_ov_type_map.items():
85
+ if isinstance(value, np_dtype):
86
+ return ov_type
87
+ return None
88
+
89
+
90
+ def _get_ov_type_from_value(value):
91
+ ov_type = get_type_from_np_type(value)
92
+ if ov_type is None:
93
+ ov_type = get_type_from_py_type(value)
94
+ return ov_type
95
+
96
+
97
+ def get_ov_type_for_value(value):
98
+ if isinstance(value, (jax.core.Var, jax.core.Literal)):
99
+ if value.aval.dtype in jax_to_ov_type_map:
100
+ return OVAny(jax_to_ov_type_map[value.aval.dtype])
101
+ for k, v in numpy_to_ov_type_map.items():
102
+ if value.aval.dtype == k:
103
+ return OVAny(v)
104
+ for k, v in basic_to_ov_type_map.items():
105
+ if isinstance(value.aval.dtype, k):
106
+ return OVAny(v)
107
+ elif isinstance(value, (int, float, bool)):
108
+ return OVAny(jax_to_ov_type_map[type(value)])
109
+ else:
110
+ raise NotImplementedError(f"dtype for {value} of type {type(value)} has not been supported yet.")
111
+
112
+
113
+ def get_ov_type_from_jax_type(dtype):
114
+ if dtype in jax_to_ov_type_map:
115
+ return OVAny(jax_to_ov_type_map[dtype])
116
+ for k, v in numpy_to_ov_type_map.items():
117
+ if dtype == k:
118
+ return OVAny(v)
119
+ for k, v in basic_to_ov_type_map.items():
120
+ if isinstance(dtype, k):
121
+ return OVAny(v)
122
+ return None
123
+
124
+
125
+ def jax_array_to_ov_const(arr: np.ndarray, shared_memory=True):
126
+ # TODO: deal with bfloat16 dtype here.
127
+ if isinstance(arr, np.ndarray):
128
+ return op.Constant(arr, shared_memory=shared_memory)
129
+ elif isinstance(arr, jax.Array):
130
+ return op.Constant(np.array(jax.device_get(arr)), shared_memory=shared_memory)
131
+ else:
132
+ raise ValueError(f"Constant is expected to be a numpy array or jax array but got {type(arr)}")
133
+
134
+
135
+ def ivalue_to_constant(ivalue, shared_memory=True):
136
+ '''
137
+ Convert a python object to an openvino constant.
138
+ '''
139
+ # print('ivalue = ', ivalue)
140
+ ivalue = filter_ivalue(ivalue)
141
+ ov_type = _get_ov_type_from_value(ivalue)
142
+ if ov_type.is_static():
143
+ return op.Constant(ov_type, Shape([]), [ivalue]).outputs()
144
+ if isinstance(ivalue, (list, tuple)):
145
+ assert len(ivalue) > 0, "Can't deduce type for empty list"
146
+ if isinstance(ivalue[0], (list, tuple)):
147
+ second_len = len(ivalue[0])
148
+ flattened_ivalue = []
149
+ for value in ivalue:
150
+ assert isinstance(value, (list, tuple)), "Can't deduce type for a list with both list and basic types."
151
+ assert len(value) == second_len or len(value) == 0, "Can't deduce type for nested list with different lengths."
152
+ flattened_ivalue.extend([filter_element(item) for item in value])
153
+ flattened_ivalue = [item for sublist in ivalue for item in sublist]
154
+ ov_type = _get_ov_type_from_value(flattened_ivalue[0])
155
+ assert ov_type.is_static(), f"Can't deduce type {flattened_ivalue[0].__class__} for list"
156
+ return op.Constant(ov_type, Shape([len(ivalue), second_len]), flattened_ivalue).outputs()
157
+ ivalue = [filter_element(item) for item in ivalue]
158
+ ov_type = _get_ov_type_from_value(ivalue[0])
159
+ try:
160
+ assert ov_type.is_static(), f"Can't deduce type {ivalue[0].__class__} for list"
161
+ except:
162
+ # TODO 150596: remove this workaround
163
+ ivalue = [0]
164
+ ov_type = OVType.f32
165
+ return op.Constant(ov_type, Shape([len(ivalue)]), ivalue).outputs()
166
+
167
+ if isinstance(ivalue, (jax.Array, np.ndarray)):
168
+ return jax_array_to_ov_const(ivalue, shared_memory=shared_memory).outputs()
169
+
170
+ ov_dtype_value = get_ov_type_from_jax_type(ivalue)
171
+ if ov_dtype_value is not None:
172
+ return op.Constant(OVType.i64, Shape([]), [ov_type_to_int_map[ov_dtype_value]]).outputs()
173
+
174
+ return None
175
+
176
+
177
+ def param_to_constants(primitive: str, param_name: str, jaxpr, shared_memory=True):
178
+ processed_params = filter_param(primitive, param_name, jaxpr)
179
+
180
+ for k, v in processed_params.items():
181
+ processed_params[k] = ivalue_to_constant(v, shared_memory=shared_memory)
182
+ return processed_params
@@ -0,0 +1,15 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Low level wrappers for the FrontEnd C++ API.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ try:
12
+ from openvino.frontend.onnx.py_onnx_frontend import ConversionExtensionONNX as ConversionExtension
13
+ from openvino.frontend.onnx.py_onnx_frontend import OpExtensionONNX as OpExtension
14
+ except ImportError as err:
15
+ raise ImportError("OpenVINO ONNX frontend is not available, please make sure the frontend is built. " "{}".format(err))
@@ -0,0 +1,15 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Low level wrappers for the FrontEnd C++ API.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ try:
12
+ from openvino.frontend.paddle.py_paddle_frontend import ConversionExtensionPaddle as ConversionExtension
13
+ from openvino.frontend.paddle.py_paddle_frontend import OpExtensionPaddle as OpExtension
14
+ except ImportError as err:
15
+ raise ImportError("OpenVINO Paddle frontend is not available, please make sure the frontend is built." "{}".format(err))
@@ -0,0 +1,19 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """
5
+ Package: openvino
6
+ Low level wrappers for the FrontEnd C++ API.
7
+ """
8
+
9
+ # flake8: noqa
10
+
11
+ try:
12
+ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder
13
+ from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType
14
+ from openvino.frontend.pytorch.py_pytorch_frontend import ConversionExtensionPytorch as ConversionExtension
15
+ from openvino.frontend.pytorch.py_pytorch_frontend import OpExtensionPytorch as OpExtension
16
+ from openvino.frontend.pytorch.module_extension import ModuleExtension
17
+ except ImportError as err:
18
+ raise ImportError("OpenVINO PyTorch frontend is not available, please make sure the frontend is built."
19
+ "{}".format(err))
@@ -0,0 +1,370 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # flake8: noqa
5
+ # mypy: ignore-errors
6
+
7
+ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder
8
+ from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType
9
+ from openvino.runtime import op, PartialShape, Type as OVType, OVAny, Shape
10
+ from openvino.frontend.pytorch.utils import make_constant, fetch_attr, pt_to_ov_type_map, torch_tensor_to_ov_const
11
+
12
+ import torch
13
+
14
+ import logging
15
+ logger = logging.getLogger(__name__)
16
+ logger.setLevel(logging.WARNING)
17
+
18
+
19
+ class InlinedInput:
20
+ def __init__(self, data) -> None:
21
+ self.data = data
22
+
23
+
24
+ class TorchFXPythonDecoder (Decoder):
25
+
26
+ def __init__(self, pt_module, fx_gm=None, nodes=None, mark_node_callback=None, input_shapes=[], input_types=[]):
27
+ Decoder.__init__(self)
28
+ self.mark_node_callback = mark_node_callback
29
+ # We store every decoder created by this decoder so that all them are not deleted until the first decoder is deleted
30
+ self.m_decoders = []
31
+ self.pt_module = pt_module
32
+ self.fx_gm = fx_gm if fx_gm is not None else pt_module
33
+ self.input_types = [OVAny(pt_to_ov_type_map[str(t)])
34
+ for t in input_types]
35
+ self.input_shapes = input_shapes
36
+
37
+ self._input_signature = []
38
+ self._example_input = None
39
+
40
+ if issubclass(type(pt_module), torch.fx.graph_module.GraphModule):
41
+
42
+ self._input_is_list = None
43
+ self._nodes = list(pt_module.graph.nodes)
44
+ self._inputs = []
45
+ self._outputs = []
46
+ found_types = []
47
+ found_shapes = []
48
+ for i in range(len(self._nodes)):
49
+ if self._nodes[i].op == 'placeholder':
50
+ self._inputs.append(i)
51
+ value = self._nodes[i]
52
+ self._input_signature.append(value.name)
53
+ if hasattr(value, "meta") and ('tensor_meta' in value.meta.keys()) and value.meta['tensor_meta']:
54
+ found_shapes.append(value.meta['tensor_meta'].shape)
55
+ found_types.append(
56
+ OVAny(pt_to_ov_type_map[str(value.meta['tensor_meta'].dtype)]))
57
+ else:
58
+ found_shapes.append(None)
59
+ found_types.append(None)
60
+ elif self._nodes[i].op == 'output':
61
+ # Instead of putting output index, refer to its target
62
+ uargs = self.unpack_containers(self._nodes[i].args)
63
+ self._outputs = [(arg[0], self._nodes.index(arg[1]))
64
+ for arg in uargs if arg[1] is not None]
65
+ for idx, shape in enumerate(found_shapes):
66
+ if shape is not None:
67
+ new_shape = []
68
+ for dim in range(0, len(shape)):
69
+ if (type(shape[dim]).__name__ == "SymInt"):
70
+ new_shape.append(-1)
71
+ else:
72
+ new_shape.append(shape[dim])
73
+ found_shapes[idx] = torch.Size(new_shape)
74
+
75
+ if not input_shapes or len(input_shapes) == 0:
76
+ self.input_shapes = found_shapes
77
+ if not input_types or len(input_types) == 0:
78
+ self.input_types = found_types
79
+
80
+ elif issubclass(type(pt_module), torch.fx.Node):
81
+
82
+ self._nodes = nodes # passed from outer context
83
+
84
+ # FIXME: Quadratic complexity nodes*nodes considering the outer loop over all nodes
85
+ self._outputs = [("", self._nodes.index(pt_module))]
86
+
87
+ # None in inputs mean the input is inlined or None (also considered inlined)
88
+ self._inputs = [self._nodes.index(
89
+ arg) if arg in self._nodes else InlinedInput(arg) for arg in pt_module.args]
90
+
91
+ # FIXME: Find a better way to pass nested tuples to OV frontend. This is a temporary solution to flatten arguments.
92
+ new_inputs = []
93
+ self.input_types = []
94
+ for i in range(len(pt_module.args)):
95
+ if isinstance(pt_module.args[i], (list, tuple)) and any([isinstance(a, torch.fx.Node) for a in pt_module.args[i]]):
96
+ for arg in pt_module.args[i]:
97
+ if arg in self._nodes:
98
+ new_inputs.append(self._nodes.index(arg))
99
+ else:
100
+ new_inputs.append(InlinedInput(arg))
101
+ self.input_types.append(OVAny(DecoderType.List(
102
+ TorchFXPythonDecoder.get_type_for_value(arg))))
103
+ else:
104
+ v = self._inputs[i]
105
+ new_inputs.append(v)
106
+ self.input_types.append(
107
+ TorchFXPythonDecoder.get_type_for_value(v.data if isinstance(v, InlinedInput) else self._nodes[v]))
108
+ self._inputs = new_inputs
109
+
110
+ def inputs(self):
111
+ # Consider 0 a special case which may mean the input is inlined, but not guaranteed
112
+ return [x if not isinstance(x, InlinedInput) else 0 for x in self._inputs]
113
+
114
+ def is_input_inlined(self, index):
115
+ return isinstance(self._inputs[index], InlinedInput)
116
+
117
+ @staticmethod
118
+ def unpack_containers(arg):
119
+ if isinstance(arg, (tuple, list)):
120
+ res = []
121
+ for e in arg:
122
+ res.extend(TorchFXPythonDecoder.unpack_containers(e))
123
+ return res
124
+ elif isinstance(arg, dict):
125
+ res = []
126
+ for k, e in arg.items():
127
+ unpacked = TorchFXPythonDecoder.unpack_containers(e)
128
+ if len(unpacked) == 1:
129
+ unpacked[0] = (k, unpacked[0][1])
130
+ res.extend(unpacked)
131
+ return res
132
+ else:
133
+ return [("", arg)]
134
+
135
+ @staticmethod
136
+ def arg_to_constant(arg):
137
+ if isinstance(arg, list):
138
+ if len(arg) > 0:
139
+ return make_constant(pt_to_ov_type_map[type(
140
+ arg[0]).__name__], Shape([len(arg)]), arg)
141
+ else:
142
+ # TODO: which type should we use if list is empty? Need a signaling value here
143
+ return make_constant(OVType.i32, Shape([0]), [])
144
+ elif isinstance(arg, bool):
145
+ return make_constant(OVType.boolean, Shape([]), [arg])
146
+ elif isinstance(arg, int):
147
+ return make_constant(OVType.i64, Shape([]), [arg])
148
+ elif isinstance(arg, float):
149
+ return make_constant(OVType.f32, Shape([]), [arg])
150
+ elif isinstance(arg, str):
151
+ u8_tensor = torch.frombuffer(str.encode(arg), dtype=torch.uint8)
152
+ return torch_tensor_to_ov_const(u8_tensor, shared_memory=True)
153
+ return None
154
+
155
+ def inlined_input(self, index):
156
+ assert index < len(self._inputs), "Requested input doesn't exist"
157
+ assert isinstance(
158
+ self._inputs[index], InlinedInput), "Requested input which is not inlined"
159
+ arg = self._inputs[index].data
160
+ assert arg is not None, f"Requested None inlined input for op {self.get_op_type()}"
161
+ constant = None
162
+ constant = self.arg_to_constant(arg)
163
+
164
+ if constant is not None:
165
+ return constant.outputs()
166
+ else:
167
+ return []
168
+
169
+ def input(self, index): # TODO: remove
170
+ return self.inputs()[index] # TODO: find specialized method
171
+
172
+ def get_input_debug_name(self, index):
173
+ return "input"+str(index)
174
+
175
+ def get_input_signature_name(self, index: int) -> str:
176
+ if self._input_signature is not None and index < len(self._input_signature):
177
+ return self._input_signature[index]
178
+ return self.get_input_debug_name(index)
179
+
180
+ def get_input_shape(self, index):
181
+ if index < len(self.input_shapes) and self.input_shapes[index] is not None:
182
+ return PartialShape(self.input_shapes[index])
183
+ input = self._raw_input(index)
184
+ return self.get_shape_for_value(input)
185
+
186
+ def get_input_strides(self, index: int) -> list:
187
+ raw_input = self._raw_input(index)
188
+ if isinstance(raw_input, torch.fx.node.Node) and hasattr(raw_input, "meta"):
189
+ meta = raw_input.meta
190
+ if "tensor_meta" in meta and hasattr(meta["tensor_meta"], "stride"):
191
+ strides = list(meta["tensor_meta"].stride)
192
+ if strides:
193
+ return strides
194
+ return []
195
+
196
+ def get_input_type(self, index):
197
+ if index < len(self.input_types) and self.input_types[index] is not None:
198
+ return self.input_types[index]
199
+ input = self._raw_input(index)
200
+ return self.get_type_for_value(input)
201
+
202
+ def get_output_debug_name(self, index):
203
+ if self._outputs is not None and index < len(self._outputs) and self._outputs[index][0]:
204
+ return self._outputs[index][0]
205
+ name = getattr(self.pt_module, "name", "output")
206
+ return name + ":" + str(index)
207
+
208
+ def get_output_shape(self, index):
209
+ output = self._raw_output(index)
210
+ return self.get_shape_for_value(output)
211
+
212
+ def get_output_type(self, index):
213
+ output = self._raw_output(index)
214
+ return self.get_type_for_value(output)
215
+
216
+ def get_shape_for_value(self, value):
217
+ if value and hasattr(value, "meta") and ('tensor_meta' in value.meta.keys()):
218
+ if value.meta['tensor_meta']:
219
+ return PartialShape(len(value.meta['tensor_meta'].shape) * [-1])
220
+ return PartialShape.dynamic()
221
+
222
+ @staticmethod
223
+ def get_type_for_value(value):
224
+ if issubclass(type(value), torch.fx.Node):
225
+ if ('tensor_meta' in value.meta.keys()):
226
+ if value.meta['tensor_meta'] and isinstance(value.meta['tensor_meta'], torch.Tensor):
227
+ pt_type = value.meta['tensor_meta'].dtype
228
+ if str(pt_type) in pt_to_ov_type_map:
229
+ ov_type = pt_to_ov_type_map[str(pt_type)]
230
+ return OVAny(ov_type)
231
+ return OVAny(OVType.dynamic)
232
+ elif isinstance(value, int):
233
+ return OVAny(DecoderType.PyScalar(OVAny(OVType.i64)))
234
+ elif isinstance(value, float):
235
+ return OVAny(DecoderType.PyScalar(OVAny(OVType.f32)))
236
+ elif isinstance(value, bool):
237
+ return OVAny(DecoderType.PyScalar(OVAny(OVType.boolean)))
238
+ return OVAny(OVType.dynamic)
239
+
240
+ def get_attribute(self, name):
241
+ if name in self.pt_module.kwargs:
242
+ attr = self.pt_module.kwargs[name]
243
+ if isinstance(attr, torch.dtype):
244
+ return OVAny(pt_to_ov_type_map[str(attr)])
245
+ if isinstance(attr, torch.device):
246
+ return OVAny(attr.type)
247
+ if isinstance(attr, str):
248
+ return OVAny(attr)
249
+ # Numeric attrs convert to Constant
250
+ constant = self.arg_to_constant(attr)
251
+ if constant is not None:
252
+ return OVAny(constant.output(0))
253
+ # so that has_attribute return True if attribute exist
254
+ return OVAny(DecoderType.PyNone())
255
+ return OVAny(None)
256
+
257
+ def get_named_input(self, name):
258
+ """
259
+ Returns id of kwargs input. Such input can be Node or a constant value,
260
+ this function is only used for to return node index. If the input is
261
+ constant, get_attribute should be used.
262
+ """
263
+ if name in self.pt_module.kwargs:
264
+ arg = self.pt_module.kwargs[name]
265
+ if isinstance(arg, torch.fx.Node):
266
+ return self._nodes.index(arg)
267
+ raise RuntimeError("This input is not a Node")
268
+
269
+ def get_subgraph_size(self):
270
+ return len(self.get_subgraphs())
271
+
272
+ def decoder_type_name(self) -> str:
273
+ return "fx"
274
+
275
+ def visit_subgraph(self, node_visitor):
276
+ # make sure topological order is satisfied
277
+ for node in self._nodes:
278
+ if node.op == 'placeholder' or node.op == 'output':
279
+ continue # skipping non-operational nodes
280
+ if node.op == 'call_function' and str(node.target) in ["aten._assert_async.msg"]:
281
+ continue
282
+ decoder = TorchFXPythonDecoder(
283
+ node, self.fx_gm, self._nodes, mark_node_callback=self.mark_node_callback)
284
+ self.m_decoders.append(decoder)
285
+ node_visitor(decoder)
286
+
287
+ def get_subgraphs(self):
288
+ return []
289
+
290
+ def get_subgraph_decoder(self, index):
291
+ decoder = TorchFXPythonDecoder(self.get_subgraphs()[index],
292
+ self.fx_gm,
293
+ mark_node_callback=self.mark_node_callback)
294
+ self.m_decoders.append(decoder)
295
+ return decoder
296
+
297
+ def get_op_type(self):
298
+ if self.pt_module.op == 'call_function':
299
+ return str(self.pt_module.target)
300
+ elif self.pt_module.op == 'get_attr':
301
+ return 'get_attr' # FIXME should be aligned with get_attr from TS implementation
302
+ else:
303
+ return 'UNKNOWN_TYPE_' + str(self.pt_module.op)
304
+
305
+ def get_schema(self):
306
+ return 'NONE'
307
+
308
+ def outputs(self):
309
+ return [o[1] for o in self._outputs]
310
+
311
+ def _raw_outputs(self):
312
+ return [self._nodes[x[1]] for x in self._outputs]
313
+
314
+ def _raw_output(self, index):
315
+ return self._raw_outputs()[index]
316
+
317
+ def _raw_inputs(self):
318
+ return [self._nodes[x] if not isinstance(x, InlinedInput) and x < len(self._nodes) else x.data for x in self._inputs]
319
+
320
+ def _raw_input(self, index):
321
+ return self._raw_inputs()[index]
322
+
323
+ def num_of_outputs(self):
324
+ return len(self.outputs())
325
+
326
+ def output_list_size(self):
327
+ max_out_id = -1
328
+ for user in self.pt_module.users:
329
+ if "<built-in function getitem>" == str(user.target) and max_out_id < user.args[1]:
330
+ max_out_id = user.args[1]
331
+ return max_out_id + 1
332
+
333
+ def output(self, index):
334
+ return self.outputs()[index]
335
+
336
+ def mark_node(self, node):
337
+ name = self.get_op_type()
338
+ if "FrameworkNode" not in node.get_type_name():
339
+ name += "/" + node.get_type_name()
340
+ node.set_friendly_name(self.pt_module.name + "/" + name)
341
+ if self.mark_node_callback is not None:
342
+ self.mark_node_callback(self, node)
343
+ return node
344
+
345
+ def as_constant(self):
346
+ assert self.pt_module.op == 'get_attr', "Only get_attr is supported"
347
+ # Extract Constant from FX module field
348
+ ret = fetch_attr(self.fx_gm, self.pt_module.target)
349
+ ov_const = torch_tensor_to_ov_const(ret, shared_memory=True)
350
+ return ov_const.outputs()
351
+
352
+ def as_string(self):
353
+ return None
354
+
355
+ def input_is_none(self, index):
356
+ if index >= len(self._inputs) or (isinstance(self._inputs[index], InlinedInput) and self._inputs[index].data is None):
357
+ return True
358
+ else:
359
+ r_input = self._raw_input(index)
360
+ return str(type(r_input)) in ['torch.NoneType', 'NoneType']
361
+
362
+ def debug(self):
363
+ self.pt_module.print()
364
+
365
+ def may_produce_alias(self, in_index: int, out_index: int) -> bool:
366
+ return False
367
+
368
+ def get_rt_info(self):
369
+ rt_info = {}
370
+ return rt_info