bigdl-core-npu 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl-core-npu/__init__.py +0 -0
- bigdl-core-npu/common.lib +0 -0
- bigdl-core-npu/ggml.dll +0 -0
- bigdl-core-npu/ggml.lib +0 -0
- bigdl-core-npu/include/llamacpp/arg.h +77 -0
- bigdl-core-npu/include/llamacpp/common.h +563 -0
- bigdl-core-npu/include/llamacpp/ggml-alloc.h +76 -0
- bigdl-core-npu/include/llamacpp/ggml-backend.h +241 -0
- bigdl-core-npu/include/llamacpp/ggml.h +2679 -0
- bigdl-core-npu/include/llamacpp/llama.h +1234 -0
- bigdl-core-npu/include/llamacpp/log.h +92 -0
- bigdl-core-npu/include/npu/npu_common.h +119 -0
- bigdl-core-npu/include/npu/npu_llm.h +77 -0
- bigdl-core-npu/llama-cli-npu.exe +0 -0
- bigdl-core-npu/llama.dll +0 -0
- bigdl-core-npu/llama.lib +0 -0
- bigdl-core-npu/llm-cli.exe +0 -0
- bigdl-core-npu/npu_llm.dll +0 -0
- bigdl-core-npu/npu_llm.lib +0 -0
- bigdl-core-npu/zlib1.dll +0 -0
- bigdl_core_npu-2.6.0.data/scripts/init-llama-cpp.bat +29 -0
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/METADATA +12 -3
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/RECORD +146 -96
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/WHEEL +1 -1
- {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/top_level.txt +1 -0
- intel_npu_acceleration_library/_version.py +1 -1
- intel_npu_acceleration_library/backend/base.py +39 -4
- intel_npu_acceleration_library/backend/bindings.py +109 -5
- intel_npu_acceleration_library/backend/factory.py +264 -47
- intel_npu_acceleration_library/backend/ops.py +2 -1
- intel_npu_acceleration_library/backend/qlinear.py +8 -4
- intel_npu_acceleration_library/backend/runtime.py +7 -2
- intel_npu_acceleration_library/backend/tensor.py +73 -3
- intel_npu_acceleration_library/bigdl-core-npu/cache.json +113732 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_c.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbb12.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/device.py +2 -2
- intel_npu_acceleration_library/dtypes.py +34 -1
- intel_npu_acceleration_library/external/openvino/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +37 -19
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +47 -6
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +28 -8
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +17 -5
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +1 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +55 -47
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +95 -63
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +12 -10
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +31 -10
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +4 -4
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +1 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +1 -1
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +2 -1
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +5 -6
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +193 -2
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +69 -43
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +4 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +21 -3
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +88 -2
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +9 -9
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +16 -2
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +5 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +68 -16
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +69 -60
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +90 -3
- intel_npu_acceleration_library/external/openvino/utils.py +17 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/nn/module.py +17 -17
@@ -15,4 +15,5 @@ from openvino._pyopenvino.op import Parameter
|
|
15
15
|
from openvino._pyopenvino.op import if_op
|
16
16
|
from openvino._pyopenvino.op import loop
|
17
17
|
from openvino._pyopenvino.op import tensor_iterator
|
18
|
+
from openvino._pyopenvino.op import read_value
|
18
19
|
from openvino._pyopenvino.op import Result
|
@@ -31,7 +31,7 @@ from openvino.runtime.utils.types import (
|
|
31
31
|
get_element_type_str,
|
32
32
|
make_constant_node,
|
33
33
|
)
|
34
|
-
|
34
|
+
from openvino.utils import deprecated
|
35
35
|
|
36
36
|
_get_node_factory_opset1 = partial(_get_node_factory, "opset1")
|
37
37
|
|
@@ -1532,6 +1532,7 @@ def lstm_cell(
|
|
1532
1532
|
return _get_node_factory_opset1().create("LSTMCell", node_inputs, attributes)
|
1533
1533
|
|
1534
1534
|
|
1535
|
+
@deprecated(version="2025.0", message="Use lstm_sequence from opset 5")
|
1535
1536
|
@nameable_op
|
1536
1537
|
def lstm_sequence(
|
1537
1538
|
X: NodeInput,
|
@@ -180,8 +180,7 @@ def multinomial(
|
|
180
180
|
inputs = as_nodes(probs, num_samples, name=name)
|
181
181
|
|
182
182
|
if global_seed < 0:
|
183
|
-
raise RuntimeError(
|
184
|
-
f"global_seed should be positive or 0. Got: {global_seed}")
|
183
|
+
raise RuntimeError(f"global_seed should be positive or 0. Got: {global_seed}")
|
185
184
|
|
186
185
|
if op_seed < 0:
|
187
186
|
raise RuntimeError(f"op_seed should be positive or 0. Got: {op_seed}")
|
@@ -223,8 +222,7 @@ def nms_rotated(
|
|
223
222
|
:param clockwise: Flag that specifies direction of the box rotation.
|
224
223
|
:return: The new node which performs NMSRotated
|
225
224
|
"""
|
226
|
-
inputs = as_nodes(boxes, scores, max_output_boxes_per_class,
|
227
|
-
iou_threshold, score_threshold, name=name)
|
225
|
+
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, name=name)
|
228
226
|
|
229
227
|
attributes = {
|
230
228
|
"sort_result_descending": sort_result_descending,
|
@@ -301,6 +299,7 @@ def constant(
|
|
301
299
|
- dtype force conversion of data.
|
302
300
|
:return: The Constant node initialized with provided data.
|
303
301
|
"""
|
302
|
+
|
304
303
|
def display_shared_memory_warning(warning_message: str) -> None:
|
305
304
|
if shared_memory:
|
306
305
|
log.warning(f"{warning_message}. Memory sharing is disabled by default. Set shared_memory=False to hide this warning.")
|
@@ -313,10 +312,10 @@ def constant(
|
|
313
312
|
# Handle type casting, when dtype is not None:
|
314
313
|
if dtype:
|
315
314
|
# Expect packed data, use different constructor to handle it correctly:
|
316
|
-
if dtype in [Type.u1, Type.i4, Type.u4, Type.nf4]:
|
315
|
+
if dtype in [Type.u1, Type.i4, Type.u4, Type.nf4, Type.f4e2m1]:
|
317
316
|
display_shared_memory_warning(f"Constant initialized with packed type of {dtype}")
|
318
317
|
return Constant(dtype, Shape(_value.shape), _value.flatten().tolist())
|
319
|
-
elif dtype in [Type.bf16]:
|
318
|
+
elif dtype in [Type.bf16, Type.f8e8m0, Type.f8e4m3, Type.f8e5m2]:
|
320
319
|
display_shared_memory_warning(f"Constant initialized with OpenVINO custom {dtype}")
|
321
320
|
return Constant(dtype, Shape(_value.shape), _value.flatten().tolist())
|
322
321
|
# General use-case for all other types:
|
@@ -7,4 +7,11 @@
|
|
7
7
|
# TODO (ticket 138273): Add previous opset operators at the end of opset15 development
|
8
8
|
from openvino.runtime.opset1.ops import parameter
|
9
9
|
from openvino.runtime.opset15.ops import col2im
|
10
|
+
from openvino.runtime.opset15.ops import embedding_bag_offsets
|
11
|
+
from openvino.runtime.opset15.ops import embedding_bag_packed
|
10
12
|
from openvino.runtime.opset15.ops import scatter_nd_update
|
13
|
+
from openvino.runtime.opset15.ops import roi_align_rotated
|
14
|
+
from openvino.runtime.opset15.ops import string_tensor_pack
|
15
|
+
from openvino.runtime.opset15.ops import string_tensor_unpack
|
16
|
+
from openvino.runtime.opset15.ops import bitwise_left_shift
|
17
|
+
from openvino.runtime.opset15.ops import bitwise_right_shift
|
@@ -4,11 +4,14 @@
|
|
4
4
|
|
5
5
|
"""Factory functions for ops added to openvino opset15."""
|
6
6
|
from functools import partial
|
7
|
-
from typing import
|
7
|
+
from typing import List, Literal, Optional
|
8
8
|
|
9
|
+
import numpy as np
|
9
10
|
from openvino.runtime import Node, Type
|
11
|
+
from openvino.runtime.opset1 import convert_like
|
12
|
+
from openvino.runtime.opset14 import constant
|
10
13
|
from openvino.runtime.opset_utils import _get_node_factory
|
11
|
-
from openvino.runtime.utils.decorators import nameable_op
|
14
|
+
from openvino.runtime.utils.decorators import binary_op, nameable_op
|
12
15
|
from openvino.runtime.utils.types import NodeInput, as_nodes
|
13
16
|
|
14
17
|
_get_node_factory_opset15 = partial(_get_node_factory, "opset15")
|
@@ -83,3 +86,191 @@ def col2im(
|
|
83
86
|
"pads_end": pads_end,
|
84
87
|
},
|
85
88
|
)
|
89
|
+
|
90
|
+
|
91
|
+
@nameable_op
|
92
|
+
def embedding_bag_offsets(
|
93
|
+
emb_table: NodeInput,
|
94
|
+
indices: NodeInput,
|
95
|
+
offsets: NodeInput,
|
96
|
+
default_index: Optional[NodeInput] = None,
|
97
|
+
per_sample_weights: Optional[NodeInput] = None,
|
98
|
+
reduction: Literal["sum", "mean"] = "sum",
|
99
|
+
name: Optional[str] = None,
|
100
|
+
) -> Node:
|
101
|
+
"""Return a node which performs sums or means of bags of embeddings without the intermediate embeddings.
|
102
|
+
|
103
|
+
:param emb_table: Tensor containing the embedding lookup table.
|
104
|
+
:param indices: 1D Tensor with indices.
|
105
|
+
:param offsets: 1D Tensor containing the starting index positions of each bag in indices.
|
106
|
+
:param per_sample_weights: Tensor with weights for each sample.
|
107
|
+
:param default_index: Scalar containing default index in embedding table to fill empty bags.
|
108
|
+
If unset or set to -1, empty bags will be filled with 0.
|
109
|
+
Reverse indexing using negative indices is not supported.
|
110
|
+
:param reduction: String to select algorithm used to perform reduction of elements in bag.
|
111
|
+
:param name: Optional name for output node.
|
112
|
+
:return: The new node performing EmbeddingBagOffsets operation.
|
113
|
+
"""
|
114
|
+
inputs = [emb_table, indices, offsets]
|
115
|
+
if default_index is not None:
|
116
|
+
inputs.append(default_index)
|
117
|
+
elif per_sample_weights is not None:
|
118
|
+
inputs.append(convert_like(constant(np.array(-1, np.int32)), inputs[1]))
|
119
|
+
if per_sample_weights is not None:
|
120
|
+
inputs.append(per_sample_weights)
|
121
|
+
|
122
|
+
return _get_node_factory_opset15().create("EmbeddingBagOffsets", as_nodes(*inputs, name=name), {"reduction": reduction})
|
123
|
+
|
124
|
+
|
125
|
+
@nameable_op
|
126
|
+
def embedding_bag_packed(
|
127
|
+
emb_table: NodeInput,
|
128
|
+
indices: NodeInput,
|
129
|
+
per_sample_weights: Optional[NodeInput] = None,
|
130
|
+
reduction: Literal["sum", "mean"] = "sum",
|
131
|
+
name: Optional[str] = None,
|
132
|
+
) -> Node:
|
133
|
+
"""Return a node which performs sums or means of "bags" of embeddings, without the intermediate embeddings.
|
134
|
+
|
135
|
+
:param emb_table: Tensor containing the embedding lookup table.
|
136
|
+
:param indices: 2D Tensor of shape [batch, indices_per_bag] with indices.
|
137
|
+
:param per_sample_weights: Tensor of weights to be multiplied with embedding table with same shape as indices.
|
138
|
+
:param reduction: Operator to perform reduction of elements in bag.
|
139
|
+
:param name: Optional name for output node.
|
140
|
+
:return: The new node performing EmbeddingBagPacked operation.
|
141
|
+
"""
|
142
|
+
inputs = [emb_table, indices]
|
143
|
+
if per_sample_weights is not None:
|
144
|
+
inputs.append(per_sample_weights)
|
145
|
+
|
146
|
+
return _get_node_factory_opset15().create("EmbeddingBagPacked", as_nodes(*inputs, name=name), {"reduction": reduction})
|
147
|
+
|
148
|
+
|
149
|
+
@nameable_op
|
150
|
+
def roi_align_rotated(
|
151
|
+
data: NodeInput,
|
152
|
+
rois: NodeInput,
|
153
|
+
batch_indices: NodeInput,
|
154
|
+
pooled_h: int,
|
155
|
+
pooled_w: int,
|
156
|
+
sampling_ratio: int,
|
157
|
+
spatial_scale: float,
|
158
|
+
clockwise_mode: bool,
|
159
|
+
name: Optional[str] = None,
|
160
|
+
) -> Node:
|
161
|
+
"""Return a node which performs ROIAlignRotated operation.
|
162
|
+
|
163
|
+
:param data: Input data.
|
164
|
+
:param rois: RoIs (Regions of Interest) to pool over.
|
165
|
+
:param batch_indices: Tensor with each element denoting the index of
|
166
|
+
the corresponding image in the batch.
|
167
|
+
:param pooled_h: Height of the ROI output feature map.
|
168
|
+
:param pooled_w: Width of the ROI output feature map.
|
169
|
+
:param sampling_ratio: Number of bins over height and width to use to calculate
|
170
|
+
each output feature map element.
|
171
|
+
:param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates.
|
172
|
+
:param clockwise_mode: If true, rotation angle is interpreted as clockwise,
|
173
|
+
otherwise as counterclockwise
|
174
|
+
:param name: The optional name for the output node
|
175
|
+
|
176
|
+
:return: The new node which performs ROIAlignRotated
|
177
|
+
"""
|
178
|
+
return _get_node_factory_opset15().create(
|
179
|
+
"ROIAlignRotated",
|
180
|
+
as_nodes(data, rois, batch_indices, name=name),
|
181
|
+
{
|
182
|
+
"pooled_h": pooled_h,
|
183
|
+
"pooled_w": pooled_w,
|
184
|
+
"sampling_ratio": sampling_ratio,
|
185
|
+
"spatial_scale": spatial_scale,
|
186
|
+
"clockwise_mode": clockwise_mode,
|
187
|
+
},
|
188
|
+
)
|
189
|
+
|
190
|
+
|
191
|
+
@nameable_op
|
192
|
+
def string_tensor_unpack(
|
193
|
+
data: NodeInput,
|
194
|
+
name: Optional[str] = None,
|
195
|
+
) -> Node:
|
196
|
+
"""Perform an operation which unpacks a batch of strings into three tensors.
|
197
|
+
|
198
|
+
:param data: The node providing input data.
|
199
|
+
|
200
|
+
:return: The new node performing StringTensorUnpack operation.
|
201
|
+
"""
|
202
|
+
return _get_node_factory_opset15().create(
|
203
|
+
"StringTensorUnpack",
|
204
|
+
as_nodes(data, name=name)
|
205
|
+
)
|
206
|
+
|
207
|
+
|
208
|
+
@nameable_op
|
209
|
+
def string_tensor_pack(
|
210
|
+
begins: NodeInput,
|
211
|
+
ends: NodeInput,
|
212
|
+
symbols: NodeInput,
|
213
|
+
name: Optional[str] = None,
|
214
|
+
) -> Node:
|
215
|
+
"""Perform an operation which packs a concatenated batch of strings into a batched string tensor.
|
216
|
+
|
217
|
+
:param begins: ND tensor of non-negative integer numbers containing indices of each string's beginnings.
|
218
|
+
:param ends: ND tensor of non-negative integer numbers containing indices of each string's endings.
|
219
|
+
:param symbols: 1D tensor of concatenated strings data encoded in utf-8 bytes.
|
220
|
+
|
221
|
+
:return: The new node performing StringTensorPack operation.
|
222
|
+
"""
|
223
|
+
return _get_node_factory_opset15().create(
|
224
|
+
"StringTensorPack",
|
225
|
+
as_nodes(begins, ends, symbols, name=name)
|
226
|
+
)
|
227
|
+
|
228
|
+
|
229
|
+
@binary_op
|
230
|
+
def bitwise_left_shift(
|
231
|
+
arg0: NodeInput,
|
232
|
+
arg1: NodeInput,
|
233
|
+
auto_broadcast: str = "NUMPY",
|
234
|
+
name: Optional[str] = None,
|
235
|
+
) -> Node:
|
236
|
+
"""Return node which performs BitwiseLeftShift operation on input nodes element-wise.
|
237
|
+
|
238
|
+
:param arg0: Node with data to be shifted.
|
239
|
+
:param arg1: Node with number of shifts.
|
240
|
+
:param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors.
|
241
|
+
Defaults to “NUMPY”.
|
242
|
+
|
243
|
+
:return: The new node performing BitwiseLeftShift operation.
|
244
|
+
"""
|
245
|
+
return _get_node_factory_opset15().create(
|
246
|
+
"BitwiseLeftShift",
|
247
|
+
as_nodes(arg0, arg1, name=name),
|
248
|
+
{
|
249
|
+
"auto_broadcast": auto_broadcast.upper(),
|
250
|
+
},
|
251
|
+
)
|
252
|
+
|
253
|
+
|
254
|
+
@binary_op
|
255
|
+
def bitwise_right_shift(
|
256
|
+
arg0: NodeInput,
|
257
|
+
arg1: NodeInput,
|
258
|
+
auto_broadcast: str = "NUMPY",
|
259
|
+
name: Optional[str] = None,
|
260
|
+
) -> Node:
|
261
|
+
"""Return node which performs BitwiseRightShift operation on input nodes element-wise.
|
262
|
+
|
263
|
+
:param arg0: Tensor with data to be shifted.
|
264
|
+
:param arg1: Tensor with number of shifts.
|
265
|
+
:param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors.
|
266
|
+
Defaults to “NUMPY”.
|
267
|
+
|
268
|
+
:return: The new node performing BitwiseRightShift operation.
|
269
|
+
"""
|
270
|
+
return _get_node_factory_opset15().create(
|
271
|
+
"BitwiseRightShift",
|
272
|
+
as_nodes(arg0, arg1, name=name),
|
273
|
+
{
|
274
|
+
"auto_broadcast": auto_broadcast.upper(),
|
275
|
+
},
|
276
|
+
)
|
@@ -2,35 +2,26 @@
|
|
2
2
|
# Copyright (C) 2018-2024 Intel Corporation
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
import numpy as np
|
6
|
+
|
5
7
|
"""Factory functions for all openvino ops."""
|
6
|
-
from typing import
|
8
|
+
from typing import Optional, Union
|
7
9
|
|
8
|
-
import numpy as np
|
9
10
|
from functools import partial, singledispatch
|
10
11
|
|
11
|
-
from openvino.runtime import Node,
|
12
|
+
from openvino.runtime import Node, Type, PartialShape, Output, Shape
|
12
13
|
from openvino.runtime.op import assign, Constant, Parameter
|
14
|
+
from openvino.runtime.op import read_value as _read_value
|
15
|
+
from openvino.runtime.op.util import VariableInfo, Variable
|
13
16
|
from openvino.runtime.opset_utils import _get_node_factory
|
14
|
-
from openvino.runtime.utils.decorators import
|
15
|
-
from openvino.runtime.utils.input_validation import (
|
16
|
-
assert_list_of_ints,
|
17
|
-
check_valid_attributes,
|
18
|
-
is_non_negative_value,
|
19
|
-
is_positive_value,
|
20
|
-
)
|
21
|
-
from openvino.runtime.utils.node_factory import NodeFactory
|
17
|
+
from openvino.runtime.utils.decorators import nameable_op, overloading
|
22
18
|
from openvino.runtime.utils.types import (
|
23
19
|
NodeInput,
|
24
|
-
NumericData,
|
25
20
|
NumericType,
|
26
|
-
ScalarData,
|
27
21
|
TensorShape,
|
28
22
|
as_node,
|
29
23
|
as_nodes,
|
30
|
-
get_dtype,
|
31
24
|
get_element_type,
|
32
|
-
get_element_type_str,
|
33
|
-
make_constant_node,
|
34
25
|
)
|
35
26
|
|
36
27
|
_get_node_factory_opset6 = partial(_get_node_factory, "opset6")
|
@@ -124,12 +115,13 @@ def mvn(
|
|
124
115
|
return _get_node_factory_opset6().create("MVN", inputs, attributes)
|
125
116
|
|
126
117
|
|
127
|
-
@
|
118
|
+
@overloading(Union[Node, Output, int, float, np.ndarray], str, Optional[Union[type, np.dtype, Type, str]],
|
119
|
+
Optional[Union[TensorShape, Shape, PartialShape]], Optional[str])
|
128
120
|
@nameable_op
|
129
|
-
def read_value(init_value:
|
121
|
+
def read_value(init_value: Union[Node, Output, int, float, np.ndarray],
|
130
122
|
variable_id: str,
|
131
|
-
variable_type: Optional[Union[
|
132
|
-
variable_shape: Optional[TensorShape] = None,
|
123
|
+
variable_type: Optional[Union[type, np.dtype, Type, str]] = None,
|
124
|
+
variable_shape: Optional[Union[TensorShape, Shape, PartialShape]] = None,
|
133
125
|
name: Optional[str] = None) -> Node:
|
134
126
|
"""Return a node which produces the Assign operation.
|
135
127
|
|
@@ -140,29 +132,32 @@ def read_value(init_value: NodeInput,
|
|
140
132
|
:param name: Optional name for output node.
|
141
133
|
:return: ReadValue node
|
142
134
|
"""
|
143
|
-
|
135
|
+
info = VariableInfo()
|
136
|
+
info.variable_id = variable_id
|
144
137
|
|
145
138
|
if variable_type is not None:
|
146
139
|
if not isinstance(variable_type, Type) and not isinstance(variable_type, str):
|
147
|
-
|
140
|
+
info.data_type = get_element_type(variable_type)
|
148
141
|
else:
|
149
|
-
|
142
|
+
info.data_type = variable_type
|
143
|
+
else:
|
144
|
+
info.data_type = Type.dynamic
|
150
145
|
|
151
146
|
if variable_shape is not None:
|
152
|
-
|
147
|
+
info.data_shape = PartialShape(variable_shape)
|
148
|
+
else:
|
149
|
+
info.data_shape = PartialShape.dynamic()
|
153
150
|
|
154
|
-
|
155
|
-
|
156
|
-
[as_node(init_value, name=name)],
|
157
|
-
attr_map,
|
158
|
-
)
|
151
|
+
var_from_info = Variable(info)
|
152
|
+
return _read_value(new_value=as_node(init_value, name=name), variable=var_from_info)
|
159
153
|
|
160
154
|
|
161
|
-
@
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
155
|
+
@overloading(str, Optional[Union[type, np.dtype, Type, str]], Optional[Union[TensorShape, Shape, PartialShape]], Optional[str]) # type: ignore
|
156
|
+
@nameable_op
|
157
|
+
def read_value(variable_id: str, # noqa: F811
|
158
|
+
variable_type: Optional[Union[type, np.dtype, Type, str]] = None,
|
159
|
+
variable_shape: Optional[Union[TensorShape, Shape, PartialShape]] = None,
|
160
|
+
name: Optional[str] = None) -> Node:
|
166
161
|
"""Return a node which produces the Assign operation.
|
167
162
|
|
168
163
|
:param variable_id: Id of a variable to be read.
|
@@ -171,19 +166,50 @@ def _(variable_id: str,
|
|
171
166
|
:param name: Optional name for output node.
|
172
167
|
:return: ReadValue node
|
173
168
|
"""
|
174
|
-
|
169
|
+
info = VariableInfo()
|
170
|
+
info.variable_id = variable_id
|
175
171
|
|
176
172
|
if variable_type is not None:
|
177
173
|
if not isinstance(variable_type, Type) and not isinstance(variable_type, str):
|
178
|
-
|
174
|
+
info.data_type = get_element_type(variable_type)
|
179
175
|
else:
|
180
|
-
|
176
|
+
info.data_type = variable_type
|
177
|
+
else:
|
178
|
+
info.data_type = Type.dynamic
|
181
179
|
|
182
180
|
if variable_shape is not None:
|
183
|
-
|
181
|
+
info.data_shape = PartialShape(variable_shape)
|
182
|
+
else:
|
183
|
+
info.data_shape = PartialShape.dynamic()
|
184
|
+
|
185
|
+
var_from_info = Variable(info)
|
186
|
+
|
187
|
+
return _read_value(var_from_info)
|
184
188
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
189
|
+
|
190
|
+
@overloading(Variable, Optional[str]) # type: ignore
|
191
|
+
@nameable_op
|
192
|
+
def read_value(ov_variable: Variable, # noqa: F811
|
193
|
+
name: Optional[str] = None) -> Node:
|
194
|
+
"""Return a node which produces the Assign operation.
|
195
|
+
|
196
|
+
:param ov_variable: Variable to be read.
|
197
|
+
:param name: Optional name for output node.
|
198
|
+
:return: ReadValue node
|
199
|
+
"""
|
200
|
+
return _read_value(ov_variable)
|
201
|
+
|
202
|
+
|
203
|
+
@overloading(Union[Node, Output, int, float, np.ndarray], Variable, Optional[str]) # type: ignore
|
204
|
+
@nameable_op
|
205
|
+
def read_value(init_value: Union[Node, Output, int, float, np.ndarray], # noqa: F811
|
206
|
+
ov_variable: Variable,
|
207
|
+
name: Optional[str] = None) -> Node:
|
208
|
+
"""Return a node which produces the Assign operation.
|
209
|
+
|
210
|
+
:param init_value: Optional node producing a value to be returned instead of an unassigned variable.
|
211
|
+
:param ov_variable: Variable to be read.
|
212
|
+
:param name: Optional name for output node.
|
213
|
+
:return: ReadValue node
|
214
|
+
"""
|
215
|
+
return _read_value(as_node(init_value, name=name), ov_variable)
|
@@ -336,6 +336,7 @@ def random_uniform(
|
|
336
336
|
output_type: str,
|
337
337
|
global_seed: int = 0,
|
338
338
|
op_seed: int = 0,
|
339
|
+
alignment: str = "tensorflow",
|
339
340
|
name: Optional[str] = None,
|
340
341
|
) -> Node:
|
341
342
|
"""Return a node which generates sequence of random values from uniform distribution.
|
@@ -347,6 +348,8 @@ def random_uniform(
|
|
347
348
|
'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'.
|
348
349
|
:param global_seed: Specifies global seed value. Required to be a positive integer or 0.
|
349
350
|
:param op_seed: Specifies operational seed value. Required to be a positive integer or 0.
|
351
|
+
:param alignment: Specifies alignment of the randomly generated numbers to a given framework.
|
352
|
+
Possible values: 'tensorflow', 'pytorch'. Default is 'tensorflow'.
|
350
353
|
:param name: Optional output node name.
|
351
354
|
|
352
355
|
:return: The new node which performs generation of random values from uniform distribution.
|
@@ -363,6 +366,7 @@ def random_uniform(
|
|
363
366
|
"output_type": output_type,
|
364
367
|
"global_seed": global_seed,
|
365
368
|
"op_seed": op_seed,
|
369
|
+
"alignment": alignment.lower(),
|
366
370
|
}
|
367
371
|
return _get_node_factory_opset8().create("RandomUniform", inputs, attributes)
|
368
372
|
|
@@ -5,6 +5,7 @@
|
|
5
5
|
# Enums
|
6
6
|
from openvino._pyopenvino.properties import Affinity
|
7
7
|
from openvino._pyopenvino.properties import CacheMode
|
8
|
+
from openvino._pyopenvino.properties import WorkloadType
|
8
9
|
|
9
10
|
# Properties
|
10
11
|
from openvino._pyopenvino.properties import enable_profiling
|
@@ -27,6 +28,7 @@ from openvino._pyopenvino.properties import max_batch_size
|
|
27
28
|
from openvino._pyopenvino.properties import range_for_async_infer_requests
|
28
29
|
from openvino._pyopenvino.properties import execution_devices
|
29
30
|
from openvino._pyopenvino.properties import loaded_from_cache
|
31
|
+
from openvino._pyopenvino.properties import cache_encryption_callbacks
|
30
32
|
|
31
33
|
# Submodules
|
32
34
|
from openvino.runtime.properties import hint
|
intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py
CHANGED
@@ -7,7 +7,7 @@ from typing import Any, Dict, Union, Optional
|
|
7
7
|
|
8
8
|
import numpy as np
|
9
9
|
|
10
|
-
from openvino._pyopenvino import ConstOutput, Tensor, Type
|
10
|
+
from openvino._pyopenvino import ConstOutput, Tensor, Type, RemoteTensor
|
11
11
|
from openvino.runtime.utils.data_helpers.wrappers import _InferRequestWrapper, OVDict
|
12
12
|
|
13
13
|
ContainerTypes = Union[dict, list, tuple, OVDict]
|
@@ -61,6 +61,16 @@ def _(
|
|
61
61
|
return value
|
62
62
|
|
63
63
|
|
64
|
+
@value_to_tensor.register(RemoteTensor)
|
65
|
+
def _(
|
66
|
+
value: RemoteTensor,
|
67
|
+
request: Optional[_InferRequestWrapper] = None,
|
68
|
+
is_shared: bool = False,
|
69
|
+
key: Optional[ValidKeys] = None,
|
70
|
+
) -> RemoteTensor:
|
71
|
+
return value
|
72
|
+
|
73
|
+
|
64
74
|
@value_to_tensor.register(np.ndarray)
|
65
75
|
def _(
|
66
76
|
value: np.ndarray,
|
@@ -137,7 +147,11 @@ def _(
|
|
137
147
|
def to_c_style(value: Any, is_shared: bool = False) -> Any:
|
138
148
|
if not isinstance(value, np.ndarray):
|
139
149
|
if hasattr(value, "__array__"):
|
140
|
-
|
150
|
+
if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
|
151
|
+
# https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword
|
152
|
+
return to_c_style(np.asarray(value), is_shared) if is_shared else np.asarray(value, copy=True) # type: ignore
|
153
|
+
else:
|
154
|
+
return to_c_style(np.array(value, copy=False), is_shared) if is_shared else np.array(value, copy=True)
|
141
155
|
return value
|
142
156
|
return value if value.flags["C_CONTIGUOUS"] else np.ascontiguousarray(value)
|
143
157
|
|
@@ -152,7 +166,11 @@ def normalize_arrays(
|
|
152
166
|
) -> Any:
|
153
167
|
# Check the special case of the array-interface
|
154
168
|
if hasattr(inputs, "__array__"):
|
155
|
-
|
169
|
+
if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
|
170
|
+
# https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword
|
171
|
+
return to_c_style(np.asarray(inputs), is_shared) if is_shared else np.asarray(inputs, copy=True) # type: ignore
|
172
|
+
else:
|
173
|
+
return to_c_style(np.array(inputs, copy=False), is_shared) if is_shared else np.array(inputs, copy=True)
|
156
174
|
# Error should be raised if type does not match any dispatchers
|
157
175
|
raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
|
158
176
|
|
@@ -3,8 +3,8 @@
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
5
|
from functools import wraps
|
6
|
-
from inspect import
|
7
|
-
from typing import Any, Callable,
|
6
|
+
from inspect import signature
|
7
|
+
from typing import Any, Callable, Dict, Optional, Union, get_origin, get_args
|
8
8
|
|
9
9
|
from openvino.runtime import Node, Output
|
10
10
|
from openvino.runtime.utils.types import NodeInput, as_node, as_nodes
|
@@ -68,3 +68,89 @@ def custom_preprocess_function(custom_function: Callable) -> Callable:
|
|
68
68
|
return Output._from_node(custom_function(node))
|
69
69
|
|
70
70
|
return wrapper
|
71
|
+
|
72
|
+
|
73
|
+
class MultiMethod(object):
|
74
|
+
def __init__(self, name: str):
|
75
|
+
self.name = name
|
76
|
+
self.typemap: Dict[tuple, Callable] = {}
|
77
|
+
|
78
|
+
# Checks if actual_type is a subclass of any type in the union
|
79
|
+
def matches_union(self, union_type, actual_type) -> bool: # type: ignore
|
80
|
+
for type_arg in get_args(union_type):
|
81
|
+
if isinstance(type_arg, type) and issubclass(actual_type, type_arg):
|
82
|
+
return True
|
83
|
+
elif get_origin(type_arg) == list:
|
84
|
+
if issubclass(actual_type, list):
|
85
|
+
return True
|
86
|
+
return False
|
87
|
+
|
88
|
+
def matches_optional(self, optional_type, actual_type) -> bool: # type: ignore
|
89
|
+
return actual_type is None or self.matches_union(optional_type, actual_type)
|
90
|
+
|
91
|
+
# Checks whether there is overloading which matches invoked argument types
|
92
|
+
def check_invoked_types_in_overloaded_funcs(self, tuple_to_check: tuple, key_structure: tuple) -> bool:
|
93
|
+
for actual_type, expected_type in zip(tuple_to_check, key_structure):
|
94
|
+
origin = get_origin(expected_type)
|
95
|
+
if origin is Union:
|
96
|
+
if not self.matches_union(expected_type, actual_type):
|
97
|
+
return False
|
98
|
+
elif origin is Optional:
|
99
|
+
if not self.matches_optional(expected_type, actual_type):
|
100
|
+
return False
|
101
|
+
elif not issubclass(actual_type, expected_type):
|
102
|
+
return False
|
103
|
+
return True
|
104
|
+
|
105
|
+
def __call__(self, *args, **kwargs) -> Any: # type: ignore
|
106
|
+
arg_types = tuple(arg.__class__ for arg in args)
|
107
|
+
kwarg_types = {key: type(value) for key, value in kwargs.items()}
|
108
|
+
|
109
|
+
key_matched = None
|
110
|
+
if len(kwarg_types) == 0 and len(arg_types) != 0:
|
111
|
+
for key in self.typemap.keys():
|
112
|
+
# compare types of called function with overloads
|
113
|
+
if self.check_invoked_types_in_overloaded_funcs(arg_types, key):
|
114
|
+
key_matched = key
|
115
|
+
break
|
116
|
+
elif len(arg_types) == 0 and len(kwarg_types) != 0:
|
117
|
+
for key, func in self.typemap.items():
|
118
|
+
func_signature = {arg_name: types.annotation for arg_name, types in signature(func).parameters.items()}
|
119
|
+
# if kwargs of called function are subset of overloaded function, we use this overload
|
120
|
+
if kwarg_types.keys() <= func_signature.keys():
|
121
|
+
key_matched = key
|
122
|
+
break
|
123
|
+
elif len(arg_types) != 0 and len(kwarg_types) != 0:
|
124
|
+
for key, func in self.typemap.items():
|
125
|
+
func_signature = {arg_name: types.annotation for arg_name, types in signature(func).parameters.items()}
|
126
|
+
# compare types of called function with overloads
|
127
|
+
if self.check_invoked_types_in_overloaded_funcs(arg_types, tuple(func_signature.values())):
|
128
|
+
# if kwargs of called function are subset of overloaded function, we use this overload
|
129
|
+
if kwarg_types.keys() <= func_signature.keys():
|
130
|
+
key_matched = key
|
131
|
+
break
|
132
|
+
|
133
|
+
if key_matched is None:
|
134
|
+
raise TypeError(f"The necessary overload for {self.name} was not found")
|
135
|
+
|
136
|
+
function = self.typemap.get(key_matched)
|
137
|
+
return function(*args, **kwargs) # type: ignore
|
138
|
+
|
139
|
+
def register(self, types: tuple, function: Callable) -> None:
|
140
|
+
if types in self.typemap:
|
141
|
+
raise TypeError("duplicate registration")
|
142
|
+
self.typemap[types] = function
|
143
|
+
|
144
|
+
|
145
|
+
registry: Dict[str, MultiMethod] = {}
|
146
|
+
|
147
|
+
|
148
|
+
def overloading(*types: tuple) -> Callable:
|
149
|
+
def register(function: Callable) -> MultiMethod:
|
150
|
+
name = function.__name__
|
151
|
+
mm = registry.get(name)
|
152
|
+
if mm is None:
|
153
|
+
mm = registry[name] = MultiMethod(name)
|
154
|
+
mm.register(types, function)
|
155
|
+
return mm
|
156
|
+
return register
|