bigdl-core-npu 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl-core-npu/__init__.py +0 -0
- bigdl-core-npu/common.lib +0 -0
- bigdl-core-npu/ggml.dll +0 -0
- bigdl-core-npu/ggml.lib +0 -0
- bigdl-core-npu/include/llamacpp/arg.h +77 -0
- bigdl-core-npu/include/llamacpp/common.h +563 -0
- bigdl-core-npu/include/llamacpp/ggml-alloc.h +76 -0
- bigdl-core-npu/include/llamacpp/ggml-backend.h +241 -0
- bigdl-core-npu/include/llamacpp/ggml.h +2679 -0
- bigdl-core-npu/include/llamacpp/llama.h +1234 -0
- bigdl-core-npu/include/llamacpp/log.h +92 -0
- bigdl-core-npu/include/npu/npu_common.h +119 -0
- bigdl-core-npu/include/npu/npu_llm.h +77 -0
- bigdl-core-npu/llama-cli-npu.exe +0 -0
- bigdl-core-npu/llama.dll +0 -0
- bigdl-core-npu/llama.lib +0 -0
- bigdl-core-npu/llm-cli.exe +0 -0
- bigdl-core-npu/npu_llm.dll +0 -0
- bigdl-core-npu/npu_llm.lib +0 -0
- bigdl-core-npu/zlib1.dll +0 -0
- bigdl_core_npu-2.6.0.data/scripts/init-llama-cpp.bat +29 -0
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/METADATA +12 -3
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/RECORD +146 -96
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/WHEEL +1 -1
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/top_level.txt +1 -0
- intel_npu_acceleration_library/_version.py +1 -1
- intel_npu_acceleration_library/backend/base.py +39 -4
- intel_npu_acceleration_library/backend/bindings.py +109 -5
- intel_npu_acceleration_library/backend/factory.py +264 -47
- intel_npu_acceleration_library/backend/ops.py +2 -1
- intel_npu_acceleration_library/backend/qlinear.py +8 -4
- intel_npu_acceleration_library/backend/runtime.py +7 -2
- intel_npu_acceleration_library/backend/tensor.py +73 -3
- intel_npu_acceleration_library/bigdl-core-npu/cache.json +113732 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_c.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbb12.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/device.py +2 -2
- intel_npu_acceleration_library/dtypes.py +34 -1
- intel_npu_acceleration_library/external/openvino/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +37 -19
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +47 -6
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +28 -8
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +17 -5
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +1 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +55 -47
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +95 -63
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +12 -10
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +31 -10
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +4 -4
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +1 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +1 -1
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +2 -1
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +5 -6
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +193 -2
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +69 -43
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +4 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +21 -3
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +88 -2
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +9 -9
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +16 -2
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +5 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +68 -16
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +69 -60
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +90 -3
- intel_npu_acceleration_library/external/openvino/utils.py +17 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/nn/module.py +17 -17
@@ -25,6 +25,13 @@ logger = logging.getLogger(__name__)
|
|
25
25
|
logger.setLevel(logging.WARNING)
|
26
26
|
|
27
27
|
|
28
|
+
class PatternNode:
|
29
|
+
op_types = {}
|
30
|
+
|
31
|
+
def __init__(self):
|
32
|
+
self.op_types = {}
|
33
|
+
|
34
|
+
|
28
35
|
class Partitioner:
|
29
36
|
def __init__(self, options):
|
30
37
|
self.supported_ops = OperatorSupport(options)
|
@@ -56,55 +63,56 @@ class Partitioner:
|
|
56
63
|
return True
|
57
64
|
return False
|
58
65
|
|
59
|
-
def
|
66
|
+
def check_pattern(self, node: torch.fx.Node, pattern: PatternNode, enabled_ops: list) -> bool:
|
67
|
+
if node.op == "call_function":
|
68
|
+
if ("call_function" + ":" + str(node.target)) in pattern.op_types:
|
69
|
+
pt_input_nodes = node.all_input_nodes
|
70
|
+
pattern_input_ops = pattern.op_types["call_function" + ":" + str(node.target)]
|
71
|
+
if pattern_input_ops is None:
|
72
|
+
enabled_ops.append(node)
|
73
|
+
return True
|
74
|
+
if len(pt_input_nodes) != len(pattern_input_ops):
|
75
|
+
return False
|
76
|
+
for i in range(len(pt_input_nodes)):
|
77
|
+
if not self.check_pattern(pt_input_nodes[i], pattern_input_ops[i], enabled_ops):
|
78
|
+
return False
|
79
|
+
enabled_ops.append(node)
|
80
|
+
return True
|
81
|
+
elif node.op == "get_attr":
|
82
|
+
if "get_attr" in pattern.op_types:
|
83
|
+
return True
|
84
|
+
else:
|
85
|
+
return False
|
86
|
+
return False
|
87
|
+
|
88
|
+
def capture_gptq_patterns(self, graph_module: GraphModule):
|
89
|
+
const_0_node = PatternNode
|
90
|
+
const_0_node.op_types["get_attr"] = None
|
91
|
+
unsqueeze_0_node = PatternNode
|
92
|
+
unsqueeze_0_node.op_types["call_function:aten.unsqueeze.default"] = [const_0_node,]
|
93
|
+
expand_node = PatternNode
|
94
|
+
expand_node.op_types["call_function:aten.expand.default"] = [unsqueeze_0_node,]
|
95
|
+
const_1_node = PatternNode
|
96
|
+
const_1_node.op_types["get_attr"] = None
|
97
|
+
unsqueeze_1_node = PatternNode
|
98
|
+
unsqueeze_1_node.op_types["call_function:aten.unsqueeze.default"] = [const_1_node,]
|
99
|
+
bitwise_right_shift_node = PatternNode
|
100
|
+
bitwise_right_shift_node.op_types["call_function:aten.bitwise_right_shift.Tensor"] = [expand_node, unsqueeze_1_node]
|
101
|
+
to_copy_node = PatternNode
|
102
|
+
to_copy_node.op_types["call_function:aten._to_copy.default"] = [bitwise_right_shift_node,]
|
103
|
+
add_or_to_copy_node = PatternNode
|
104
|
+
add_or_to_copy_node.op_types["call_function:aten._to_copy.default"] = [bitwise_right_shift_node,]
|
105
|
+
add_or_to_copy_node.op_types["call_function:aten.add.Tensor"] = [to_copy_node,]
|
106
|
+
bitwise_and_node = PatternNode
|
107
|
+
bitwise_and_node.op_types["call_function:aten.bitwise_and.Scalar"] = [add_or_to_copy_node,]
|
108
|
+
|
60
109
|
for node in graph_module.graph.nodes:
|
61
110
|
if str(node.op) == "call_function" and str(node.target) == "aten.bitwise_and.Scalar":
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
continue
|
68
|
-
to_copy_in_nodes = to_copy_node.all_input_nodes
|
69
|
-
if len(to_copy_in_nodes) != 1:
|
70
|
-
continue
|
71
|
-
bitwise_right_shift_node = to_copy_in_nodes[0]
|
72
|
-
if str(bitwise_right_shift_node.op) != "call_function" or str(bitwise_right_shift_node.target) != "aten.bitwise_right_shift.Tensor":
|
73
|
-
continue
|
74
|
-
bitwise_right_shift_in_nodes = bitwise_right_shift_node.all_input_nodes
|
75
|
-
if len(bitwise_right_shift_in_nodes) != 2:
|
76
|
-
continue
|
77
|
-
expand_node = bitwise_right_shift_in_nodes[0]
|
78
|
-
if str(expand_node.op) != "call_function" or str(expand_node.target) != "aten.expand.default":
|
79
|
-
continue
|
80
|
-
expand_in_nodes = expand_node.all_input_nodes
|
81
|
-
if len(expand_in_nodes) != 1:
|
82
|
-
continue
|
83
|
-
unsqueeze_0_node = expand_in_nodes[0]
|
84
|
-
if str(unsqueeze_0_node.op) != "call_function" or str(unsqueeze_0_node.target) != "aten.unsqueeze.default":
|
85
|
-
continue
|
86
|
-
unsqueeze_0_in_nodes = unsqueeze_0_node.all_input_nodes
|
87
|
-
if len(unsqueeze_0_in_nodes) != 1:
|
88
|
-
continue
|
89
|
-
const_0_node = unsqueeze_0_in_nodes[0]
|
90
|
-
if str(const_0_node.op) != "get_attr":
|
91
|
-
continue
|
92
|
-
unsqueeze_1_node = bitwise_right_shift_in_nodes[1]
|
93
|
-
if str(unsqueeze_1_node.op) != "call_function" or str(unsqueeze_1_node.target) != "aten.unsqueeze.default":
|
94
|
-
continue
|
95
|
-
unsqueeze_1_in_nodes = unsqueeze_1_node.all_input_nodes
|
96
|
-
if len(unsqueeze_1_in_nodes) != 1:
|
97
|
-
continue
|
98
|
-
const_1_node = unsqueeze_1_in_nodes[0]
|
99
|
-
if str(const_1_node.op) != "get_attr":
|
100
|
-
continue
|
101
|
-
|
102
|
-
self.supported_ops.enable_by_name(node)
|
103
|
-
self.supported_ops.enable_by_name(to_copy_node)
|
104
|
-
self.supported_ops.enable_by_name(bitwise_right_shift_node)
|
105
|
-
self.supported_ops.enable_by_name(expand_node)
|
106
|
-
self.supported_ops.enable_by_name(unsqueeze_0_node)
|
107
|
-
self.supported_ops.enable_by_name(unsqueeze_1_node)
|
111
|
+
enabled_ops = []
|
112
|
+
pattern_match = self.check_pattern(node, bitwise_and_node, enabled_ops)
|
113
|
+
if pattern_match:
|
114
|
+
for pattern_op in enabled_ops:
|
115
|
+
self.supported_ops.enable_by_name(pattern_op)
|
108
116
|
|
109
117
|
def make_partitions(self, graph_module: GraphModule, options) -> GraphModule:
|
110
118
|
allow_single_node_partition = _is_testing(options)
|
@@ -7,7 +7,14 @@
|
|
7
7
|
from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder
|
8
8
|
from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType
|
9
9
|
from openvino.runtime import op, PartialShape, Type as OVType, OVAny
|
10
|
-
from openvino.frontend.pytorch.utils import
|
10
|
+
from openvino.frontend.pytorch.utils import (
|
11
|
+
ivalue_to_constant,
|
12
|
+
get_value_from_getattr,
|
13
|
+
pt_to_ov_type_map,
|
14
|
+
prepare_example_inputs_and_model,
|
15
|
+
convert_quantized_tensor,
|
16
|
+
graph_has_ops,
|
17
|
+
)
|
11
18
|
from openvino.runtime import opset11 as ops
|
12
19
|
from openvino.frontend.pytorch import gptq
|
13
20
|
from openvino.frontend.pytorch import patch_model
|
@@ -15,20 +22,22 @@ from openvino.frontend.pytorch.module_extension import ModuleExtension
|
|
15
22
|
|
16
23
|
import typing
|
17
24
|
import torch
|
25
|
+
import inspect
|
18
26
|
|
19
27
|
|
20
|
-
class TorchScriptPythonDecoder
|
28
|
+
class TorchScriptPythonDecoder(Decoder):
|
21
29
|
def __init__(
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
30
|
+
self,
|
31
|
+
pt_module,
|
32
|
+
graph_element=None,
|
33
|
+
example_input=None,
|
34
|
+
alias_db=None,
|
35
|
+
shared_memory=True,
|
36
|
+
skip_freeze=False,
|
37
|
+
constant_cache=None,
|
38
|
+
module_extensions=None,
|
39
|
+
):
|
40
|
+
super().__init__()
|
32
41
|
# We store every decoder created by this decoder so that all them are not deleted until the first decoder is deleted
|
33
42
|
self.m_decoders = []
|
34
43
|
self._input_signature = None
|
@@ -36,7 +45,12 @@ class TorchScriptPythonDecoder (Decoder):
|
|
36
45
|
self._input_is_list = False
|
37
46
|
self.constant_cache = constant_cache if constant_cache is not None else dict()
|
38
47
|
self.module_extensions = module_extensions
|
48
|
+
self.config = None
|
49
|
+
self.out_debug_name_overwrites = {}
|
39
50
|
if graph_element is None:
|
51
|
+
if hasattr(pt_module, "config"):
|
52
|
+
self.config = pt_module.config.to_dict() if not isinstance(
|
53
|
+
pt_module.config, dict) else pt_module.config
|
40
54
|
try:
|
41
55
|
pt_module = self._get_scripted_model(
|
42
56
|
pt_module, example_input, skip_freeze)
|
@@ -53,7 +67,8 @@ class TorchScriptPythonDecoder (Decoder):
|
|
53
67
|
f"Couldn't get TorchScript module by {msg}. With exception:\n{e}\n{help_msg} "
|
54
68
|
"You can also provide TorchScript module that you obtained"
|
55
69
|
" yourself, please refer to PyTorch documentation: "
|
56
|
-
"https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html."
|
70
|
+
"https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html."
|
71
|
+
)
|
57
72
|
self.graph_element = pt_module.inlined_graph
|
58
73
|
self.alias_db = self.graph_element.alias_db()
|
59
74
|
else:
|
@@ -77,26 +92,27 @@ class TorchScriptPythonDecoder (Decoder):
|
|
77
92
|
self._transform_tensor_list_constants_to_listconstruct(
|
78
93
|
self.graph_element)
|
79
94
|
self._transform_optional_constants(self.graph_element)
|
80
|
-
self.out_debug_name_overwrites = {}
|
81
95
|
|
82
96
|
@staticmethod
|
83
97
|
def _get_preserved_attributes(model) -> list:
|
84
98
|
preserved_attributes = []
|
85
99
|
for name, module in model.named_modules():
|
86
|
-
|
87
|
-
|
88
|
-
|
100
|
+
compressed_types = [torch.int8, torch.uint8,
|
101
|
+
torch.float16, torch.bfloat16]
|
102
|
+
if hasattr(module, "weight") and getattr(module.weight, "dtype", None) in compressed_types:
|
103
|
+
preserved_attributes.append(name)
|
89
104
|
return preserved_attributes
|
90
105
|
|
91
106
|
def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False):
|
92
|
-
import torch
|
93
|
-
import inspect
|
94
|
-
|
95
107
|
freeze_by_default = False
|
96
108
|
if isinstance(pt_module, torch.nn.Module):
|
97
109
|
pt_module.eval()
|
98
110
|
input_signature = None
|
99
|
-
|
111
|
+
input_parameters = None
|
112
|
+
if isinstance(pt_module, torch.nn.Module) and not isinstance(
|
113
|
+
pt_module, (torch.jit._trace.TopLevelTracedModule,
|
114
|
+
torch.jit._script.RecursiveScriptModule)
|
115
|
+
):
|
100
116
|
# input params is dictionary contains input names and their signature values (type hints and default values if any)
|
101
117
|
input_params = inspect.signature(pt_module.forward if hasattr(
|
102
118
|
pt_module, "forward") else pt_module.__call__).parameters
|
@@ -104,7 +120,8 @@ class TorchScriptPythonDecoder (Decoder):
|
|
104
120
|
|
105
121
|
if example_inputs is None:
|
106
122
|
if self.module_extensions:
|
107
|
-
raise RuntimeError(
|
123
|
+
raise RuntimeError(
|
124
|
+
"ModuleExtension is not supported for scripting. Please provide valid example_input argument to run tracing.")
|
108
125
|
scripted = torch.jit.script(pt_module)
|
109
126
|
freeze_by_default = True
|
110
127
|
else:
|
@@ -112,9 +129,10 @@ class TorchScriptPythonDecoder (Decoder):
|
|
112
129
|
example_inputs, input_params, pt_module)
|
113
130
|
|
114
131
|
# name of attribute in a patched module where the original forward method is kept
|
115
|
-
orig_forward_name =
|
132
|
+
orig_forward_name = "_openvino_module_extension_patch_orig_forward"
|
116
133
|
if self.module_extensions:
|
117
|
-
patch_model.patch_model(
|
134
|
+
patch_model.patch_model(
|
135
|
+
pt_module, self.module_extensions, orig_forward_name)
|
118
136
|
|
119
137
|
gptq_patched = False
|
120
138
|
if gptq.detect_gptq_model(pt_module):
|
@@ -123,9 +141,9 @@ class TorchScriptPythonDecoder (Decoder):
|
|
123
141
|
gptq_patched = True
|
124
142
|
except Exception as error:
|
125
143
|
print(
|
126
|
-
|
144
|
+
"[ WARNING ] Failed patching of AutoGPTQ model. Error message:\n", error)
|
127
145
|
print(
|
128
|
-
|
146
|
+
"[ WARNING ] Tracing of the model will likely be unsuccessful or incorrect")
|
129
147
|
gptq.unpatch_model(pt_module)
|
130
148
|
gptq_patched = False
|
131
149
|
|
@@ -138,10 +156,13 @@ class TorchScriptPythonDecoder (Decoder):
|
|
138
156
|
if self.module_extensions:
|
139
157
|
patch_model.unpatch_model(pt_module, orig_forward_name)
|
140
158
|
|
141
|
-
|
159
|
+
have_to_freeze_ops = ["prim::Uninitialized",
|
160
|
+
"prim::unchecked_cast", "aten::append"]
|
161
|
+
if not freeze_by_default and graph_has_ops(scripted.inlined_graph, have_to_freeze_ops):
|
142
162
|
# freeze models with unsupported ops
|
143
163
|
freeze_by_default = True
|
144
|
-
|
164
|
+
quantized_hint_ops = ["quantized", "aten::as_strided"]
|
165
|
+
if freeze_by_default and graph_has_ops(scripted.inlined_graph, quantized_hint_ops):
|
145
166
|
# do not freeze quantized models and can't freeze for aten::as_strided it will result in incorrect inference
|
146
167
|
freeze_by_default = False
|
147
168
|
if freeze_by_default and not skip_freeze:
|
@@ -150,8 +171,10 @@ class TorchScriptPythonDecoder (Decoder):
|
|
150
171
|
scripted, preserved_attrs=preserved_attrs)
|
151
172
|
else:
|
152
173
|
f_model = scripted
|
174
|
+
self._example_input = input_parameters["example_inputs"] if input_parameters else None
|
153
175
|
else:
|
154
176
|
f_model = pt_module
|
177
|
+
self._example_input = example_inputs
|
155
178
|
|
156
179
|
self._input_signature = input_signature
|
157
180
|
return f_model
|
@@ -250,12 +273,14 @@ class TorchScriptPythonDecoder (Decoder):
|
|
250
273
|
def visit_subgraph(self, node_visitor) -> None:
|
251
274
|
# make sure topological order is satisfied
|
252
275
|
for node in self.graph_element.nodes():
|
253
|
-
decoder = TorchScriptPythonDecoder(
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
276
|
+
decoder = TorchScriptPythonDecoder(
|
277
|
+
self.pt_module,
|
278
|
+
node,
|
279
|
+
alias_db=self.alias_db,
|
280
|
+
shared_memory=self._shared_memory,
|
281
|
+
constant_cache=self.constant_cache,
|
282
|
+
module_extensions=self.module_extensions,
|
283
|
+
)
|
259
284
|
self.m_decoders.append(decoder)
|
260
285
|
node_visitor(decoder)
|
261
286
|
|
@@ -275,31 +300,32 @@ class TorchScriptPythonDecoder (Decoder):
|
|
275
300
|
return list(self.graph_element.blocks())
|
276
301
|
|
277
302
|
def get_subgraph_decoder(self, index: int):
|
278
|
-
decoder = TorchScriptPythonDecoder(
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
module_extensions=self.module_extensions)
|
303
|
+
decoder = TorchScriptPythonDecoder(
|
304
|
+
self.pt_module, self.get_subgraphs(
|
305
|
+
)[index], alias_db=self.alias_db, shared_memory=self._shared_memory, module_extensions=self.module_extensions
|
306
|
+
)
|
283
307
|
self.m_decoders.append(decoder)
|
284
308
|
return decoder
|
285
309
|
|
286
310
|
def get_op_type(self) -> str:
|
287
311
|
assert isinstance(
|
288
312
|
self.graph_element, torch.Node), "Function can be called only when self.graph_element is of type torch.Node"
|
289
|
-
if self.graph_element.kind() == "prim::PythonOp":
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
313
|
+
if self.graph_element.kind() == "prim::PythonOp" and callable(getattr(self.graph_element, "pyobj", None)):
|
314
|
+
pyobj = self.graph_element.pyobj()
|
315
|
+
trampoline = getattr(pyobj, "__self__", None)
|
316
|
+
target_extension = getattr(trampoline, "target_extension", None)
|
317
|
+
|
318
|
+
if isinstance(target_extension, ModuleExtension):
|
319
|
+
target_op = target_extension.target_op
|
320
|
+
if callable(target_op):
|
321
|
+
target = target_op(trampoline.original_module)
|
322
|
+
elif isinstance(target_op, str):
|
323
|
+
target = target_op
|
324
|
+
# TODO: Support target as a callable that will play a role of ConversionExtension for an entire module instead of a single op.
|
325
|
+
# Without supporting target as a callable here, ConversionExtension functionality is still possible to implement
|
326
|
+
# by combining two extensions: ModuleExtension that use temporary name as a target op and another extension of type ConversionExtension
|
327
|
+
# that translates that particular temporary name to custom graph. But providing conversion code as a callable `target` is more convenient.
|
328
|
+
return target
|
303
329
|
return self.graph_element.kind()
|
304
330
|
|
305
331
|
def get_schema(self) -> str:
|
@@ -339,8 +365,8 @@ class TorchScriptPythonDecoder (Decoder):
|
|
339
365
|
self.constant_cache[name] = outputs
|
340
366
|
|
341
367
|
def try_decode_get_attr(self):
|
342
|
-
pt_value, name = get_value_from_getattr(
|
343
|
-
|
368
|
+
pt_value, name = get_value_from_getattr(
|
369
|
+
self.graph_element, self.pt_module)
|
344
370
|
assert pt_value is not None, "Couldn't retrieve value from prim::GetAttr"
|
345
371
|
if isinstance(pt_value, torch.ScriptObject):
|
346
372
|
# We assume this is __torch__.torch.classes.quantized.Conv2dPackedParamsBase or __torch__.torch.classes.quantized.LinearPackedParamsBase
|
@@ -386,8 +412,8 @@ class TorchScriptPythonDecoder (Decoder):
|
|
386
412
|
if name in self.constant_cache:
|
387
413
|
const = self.constant_cache[name]
|
388
414
|
else:
|
389
|
-
const = ivalue_to_constant(
|
390
|
-
|
415
|
+
const = ivalue_to_constant(
|
416
|
+
pt_value, shared_memory=self._shared_memory)
|
391
417
|
self._add_name_to_const_and_cache(const, name)
|
392
418
|
return const
|
393
419
|
else:
|
@@ -401,12 +427,11 @@ class TorchScriptPythonDecoder (Decoder):
|
|
401
427
|
pt_value = self._raw_output(0)
|
402
428
|
pt_type = pt_value.type()
|
403
429
|
if isinstance(pt_type, torch.TensorType):
|
404
|
-
return ivalue_to_constant(pt_value.toIValue(),
|
405
|
-
shared_memory=self._shared_memory)
|
430
|
+
return ivalue_to_constant(pt_value.toIValue(), shared_memory=self._shared_memory)
|
406
431
|
if isinstance(pt_type, torch.ListType):
|
407
432
|
return self._as_constant_list(pt_value)
|
408
|
-
const = ivalue_to_constant(
|
409
|
-
|
433
|
+
const = ivalue_to_constant(
|
434
|
+
pt_value.toIValue(), shared_memory=self._shared_memory)
|
410
435
|
if len(const) > 0:
|
411
436
|
# set name corresponding to state_dict name
|
412
437
|
const[0].get_node().set_friendly_name(
|
@@ -460,8 +485,8 @@ class TorchScriptPythonDecoder (Decoder):
|
|
460
485
|
else:
|
461
486
|
in_node = r_input.node()
|
462
487
|
if in_node.kind() == "prim::GetAttr":
|
463
|
-
pt_value, _ = get_value_from_getattr(
|
464
|
-
|
488
|
+
pt_value, _ = get_value_from_getattr(
|
489
|
+
in_node, self.pt_module)
|
465
490
|
return pt_value is None
|
466
491
|
return False
|
467
492
|
|
@@ -487,6 +512,13 @@ class TorchScriptPythonDecoder (Decoder):
|
|
487
512
|
def get_named_input(self, name):
|
488
513
|
raise RuntimeError("There is no named inputs in TS graph")
|
489
514
|
|
515
|
+
def get_rt_info(self):
|
516
|
+
rt_info = {}
|
517
|
+
if self.config is not None and "quantization_config" in self.config and "sym" in self.config["quantization_config"]:
|
518
|
+
rt_info["symmetric_quantization"] = OVAny(
|
519
|
+
self.config["quantization_config"]["sym"])
|
520
|
+
return rt_info
|
521
|
+
|
490
522
|
@staticmethod
|
491
523
|
def _transform_tensor_list_constants_to_listconstruct(graph: torch.Graph):
|
492
524
|
# Function replaces prim::Constant containing List of Tensors with
|
@@ -25,7 +25,7 @@ def fetch_attr(self_module, target: str):
|
|
25
25
|
Return:
|
26
26
|
Any: The value of the attribute.
|
27
27
|
"""
|
28
|
-
target_atoms = target.split(
|
28
|
+
target_atoms = target.split(".")
|
29
29
|
attr_itr = self_module
|
30
30
|
for i, atom in enumerate(target_atoms):
|
31
31
|
if not hasattr(attr_itr, atom):
|
@@ -91,12 +91,13 @@ def get_value_from_getattr(getattr_node, self_module):
|
|
91
91
|
node = stack.pop()
|
92
92
|
attr_name = node.s("name")
|
93
93
|
assert hasattr(
|
94
|
-
module, attr_name), f
|
94
|
+
module, attr_name), f'No attribute with name "{attr_name}" found in module.'
|
95
95
|
path_name = ".".join([path_name, attr_name])
|
96
96
|
module = getattr(module, attr_name)
|
97
97
|
return module, path_name
|
98
98
|
|
99
|
-
|
99
|
+
|
100
|
+
def graph_has_ops(graph, op_types: list) -> bool:
|
100
101
|
res = False
|
101
102
|
for n in graph.nodes():
|
102
103
|
if any(kind in n.kind() for kind in op_types):
|
@@ -106,7 +107,7 @@ def graph_has_ops(graph, op_types:list) -> bool:
|
|
106
107
|
if res:
|
107
108
|
return res
|
108
109
|
return res
|
109
|
-
|
110
|
+
|
110
111
|
|
111
112
|
pt_to_ov_type_map = {
|
112
113
|
"float": OVType.f32,
|
@@ -134,7 +135,7 @@ pt_to_ov_type_map = {
|
|
134
135
|
"torch.BoolTensor": OVType.boolean,
|
135
136
|
"torch.quint8": OVType.u8,
|
136
137
|
"torch.qint8": OVType.i8,
|
137
|
-
"torch.qint32": OVType.i32
|
138
|
+
"torch.qint32": OVType.i32,
|
138
139
|
}
|
139
140
|
|
140
141
|
|
@@ -159,7 +160,7 @@ def process_dict_inputs(inputs, input_params, model):
|
|
159
160
|
ordered_inputs.append(input_name)
|
160
161
|
|
161
162
|
input_signature = list(input_params)
|
162
|
-
if ordered_inputs == input_signature[:len(ordered_inputs)]:
|
163
|
+
if ordered_inputs == input_signature[: len(ordered_inputs)]:
|
163
164
|
example_inputs = [inputs[input_name] for input_name in ordered_inputs]
|
164
165
|
if all([isinstance(inp, torch.Tensor) for inp in example_inputs]):
|
165
166
|
return {"example_inputs": [inputs[name] for name in ordered_inputs]}, ordered_inputs, model
|
@@ -191,8 +192,8 @@ def process_dict_inputs(inputs, input_params, model):
|
|
191
192
|
str(input_params[input_name]).replace("NoneType", "None"))
|
192
193
|
input_params_str.append(f"{input_name}={input_name}")
|
193
194
|
|
194
|
-
wrapper_class = wrapper_template.format(input_sign=
|
195
|
-
input_sign_str), example_input=
|
195
|
+
wrapper_class = wrapper_template.format(input_sign=", ".join(
|
196
|
+
input_sign_str), example_input=", ".join(input_params_str))
|
196
197
|
result = {}
|
197
198
|
try:
|
198
199
|
exec(wrapper_class, result)
|
@@ -210,7 +211,8 @@ def prepare_example_inputs_and_model(inputs, input_params, model):
|
|
210
211
|
input_is_list = False
|
211
212
|
input_signature = list(input_params)
|
212
213
|
if isinstance(inputs, dict):
|
213
|
-
examples, ordered, wrapped = process_dict_inputs(
|
214
|
+
examples, ordered, wrapped = process_dict_inputs(
|
215
|
+
inputs, input_params, model)
|
214
216
|
return examples, ordered, wrapped, input_is_list
|
215
217
|
if isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], torch.Tensor):
|
216
218
|
if "typing.List" in str(input_params[input_signature[0]].annotation):
|
@@ -219,7 +221,7 @@ def prepare_example_inputs_and_model(inputs, input_params, model):
|
|
219
221
|
|
220
222
|
if isinstance(inputs, torch.Tensor):
|
221
223
|
inputs = [inputs]
|
222
|
-
input_signature = input_signature[:len(inputs)]
|
224
|
+
input_signature = input_signature[: len(inputs)]
|
223
225
|
return {"example_inputs": inputs}, input_signature, model, input_is_list
|
224
226
|
|
225
227
|
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -6,13 +6,12 @@
|
|
6
6
|
|
7
7
|
|
8
8
|
import logging as log
|
9
|
+
import numpy as np
|
9
10
|
import sys
|
11
|
+
from openvino.runtime import PartialShape, Dimension, Type
|
10
12
|
from packaging.version import parse, Version
|
11
13
|
from typing import List, Dict, Union
|
12
14
|
|
13
|
-
import numpy as np
|
14
|
-
from openvino.runtime import PartialShape, Dimension, Type
|
15
|
-
|
16
15
|
|
17
16
|
# TODO: reuse this method in ovc and remove duplication
|
18
17
|
def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None):
|
@@ -106,13 +105,32 @@ def trace_tf_model_if_needed(input_model, placeholder_shapes, placeholder_data_t
|
|
106
105
|
return trace_tf_model(input_model, placeholder_shapes, placeholder_data_types, example_input)
|
107
106
|
|
108
107
|
|
109
|
-
def
|
108
|
+
def partial_shape_to_list(partial_shape: PartialShape):
|
109
|
+
if partial_shape.rank.is_dynamic:
|
110
|
+
return None
|
111
|
+
res_list = []
|
112
|
+
for dim in partial_shape:
|
113
|
+
if dim.is_static:
|
114
|
+
res_list.append(dim.get_length())
|
115
|
+
else:
|
116
|
+
res_list.append(None)
|
117
|
+
return res_list
|
118
|
+
|
119
|
+
|
120
|
+
def get_input_spec_from_model(model, input_shapes=None):
|
110
121
|
import tensorflow as tf
|
111
122
|
if hasattr(model, "_build_input_shape") and model._build_input_shape is not None:
|
112
123
|
if isinstance(model._build_input_shape, list):
|
113
124
|
input_spec = [[tf.TensorSpec(shape) for shape in model._build_input_shape]]
|
114
125
|
else:
|
115
126
|
input_spec = [tf.TensorSpec(model._build_input_shape)]
|
127
|
+
elif input_shapes and isinstance(input_shapes, list) and len(input_shapes) > 0:
|
128
|
+
input_spec = []
|
129
|
+
for input_shape in input_shapes:
|
130
|
+
if isinstance(input_shape, PartialShape):
|
131
|
+
input_spec.append(tf.TensorSpec(partial_shape_to_list(input_shape)))
|
132
|
+
else:
|
133
|
+
input_spec.append(tf.TensorSpec(None))
|
116
134
|
else:
|
117
135
|
input_spec = [tf.TensorSpec(None)]
|
118
136
|
return input_spec
|
@@ -199,10 +217,13 @@ def create_generic_function_from_keras_model(keras_model):
|
|
199
217
|
if tf_input_signature is not None:
|
200
218
|
@tf.function(input_signature=tf_input_signature)
|
201
219
|
def wrapper_function_dict(*args):
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
220
|
+
if isinstance(keras_input_signature, list):
|
221
|
+
outputs = keras_model(args)
|
222
|
+
else:
|
223
|
+
input_dict = {}
|
224
|
+
for ind, tensor_spec in enumerate(tf_input_signature):
|
225
|
+
input_dict[tensor_spec.name] = args[ind]
|
226
|
+
outputs = keras_model(input_dict)
|
206
227
|
# need to wrap the output into dictionary
|
207
228
|
# it helps to preserve original keras tensor names
|
208
229
|
post_outputs = {}
|
@@ -276,7 +297,7 @@ def trace_tf_model(model, input_shapes, input_types, example_input):
|
|
276
297
|
"Could not trace the TF model with the following error: {}",
|
277
298
|
use_example_input=False)
|
278
299
|
else:
|
279
|
-
input_spec = get_input_spec_from_model(model)
|
300
|
+
input_spec = get_input_spec_from_model(model, input_shapes)
|
280
301
|
concrete_func = get_concrete_func(tf_function, input_spec, input_needs_packing,
|
281
302
|
"Could not trace the TF model with the following error: {}.\n"
|
282
303
|
"Please provide 'example_input'.")
|
@@ -457,4 +478,4 @@ def tf_type_to_ov_type(val):
|
|
457
478
|
}
|
458
479
|
if val not in tf_to_ov_type:
|
459
480
|
raise Exception("The provided data type is not supported by OpenVino {}.".format(val))
|
460
|
-
return tf_to_ov_type[val]
|
481
|
+
return tf_to_ov_type[val]
|
@@ -20,10 +20,10 @@ def pack_data(array: np.ndarray, type: Type) -> np.ndarray:
|
|
20
20
|
|
21
21
|
:param array: numpy array with values to pack.
|
22
22
|
:type array: numpy array
|
23
|
-
:param type: Type to interpret the array values. Type must be u1, u4, i4 or
|
23
|
+
:param type: Type to interpret the array values. Type must be u1, u4, i4, nf4 or f4e2m1.
|
24
24
|
:type type: openvino.runtime.Type
|
25
25
|
"""
|
26
|
-
assert type in [Type.u1, Type.u4, Type.i4, Type.nf4], "Packing algorithm for the" "data types stored in 1, 2 or 4 bits"
|
26
|
+
assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Packing algorithm for the" "data types stored in 1, 2 or 4 bits"
|
27
27
|
|
28
28
|
minimum_regular_dtype = np.int8 if type == Type.i4 else np.uint8
|
29
29
|
casted_to_regular_type = array.astype(dtype=minimum_regular_dtype, casting="unsafe")
|
@@ -57,12 +57,12 @@ def unpack_data(array: np.ndarray, type: Type, shape: Union[list, Shape]) -> np.
|
|
57
57
|
|
58
58
|
:param array: numpy array to unpack.
|
59
59
|
:type array: numpy array
|
60
|
-
:param type: Type to extract from array values. Type must be u1, u4, i4 or
|
60
|
+
:param type: Type to extract from array values. Type must be u1, u4, i4, nf4 or f4e2m1.
|
61
61
|
:type type: openvino.runtime.Type
|
62
62
|
:param shape: the new shape for the unpacked array.
|
63
63
|
:type shape: Union[list, openvino.runtime.Shape]
|
64
64
|
"""
|
65
|
-
assert type in [Type.u1, Type.u4, Type.i4, Type.nf4], "Unpacking algorithm for the" "data types stored in 1, 2 or 4 bits"
|
65
|
+
assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Unpacking algorithm for the" "data types stored in 1, 2 or 4 bits"
|
66
66
|
unpacked = np.unpackbits(array.view(np.uint8))
|
67
67
|
shape = list(shape)
|
68
68
|
if type.bitwidth == 1:
|
@@ -24,3 +24,5 @@ from openvino._pyopenvino.preprocess import PreProcessSteps
|
|
24
24
|
from openvino._pyopenvino.preprocess import PostProcessSteps
|
25
25
|
from openvino._pyopenvino.preprocess import ColorFormat
|
26
26
|
from openvino._pyopenvino.preprocess import ResizeAlgorithm
|
27
|
+
from openvino._pyopenvino.preprocess import PaddingMode
|
28
|
+
|