bigdl-core-npu 2.6.0b20241112__cp310-cp310-win_amd64.whl → 2.6.0b20241114__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {bigdl_core_npu-2.6.0b20241112.dist-info → bigdl_core_npu-2.6.0b20241114.dist-info}/METADATA +1 -1
- {bigdl_core_npu-2.6.0b20241112.dist-info → bigdl_core_npu-2.6.0b20241114.dist-info}/RECORD +95 -85
- intel_npu_acceleration_library/_version.py +1 -1
- intel_npu_acceleration_library/backend/bindings.py +10 -0
- intel_npu_acceleration_library/backend/factory.py +2 -26
- intel_npu_acceleration_library/backend/tensor.py +69 -0
- intel_npu_acceleration_library/device.py +2 -2
- intel_npu_acceleration_library/dtypes.py +34 -1
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +283 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/py_jax_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/py_jax_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/py_jax_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/py_jax_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/py_jax_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +129 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +8 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +1 -1
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +28 -8
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +1 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +3 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +4 -4
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +1 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +2 -1
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +5 -6
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +62 -1
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +60 -43
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +4 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +1 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +67 -1
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +9 -9
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +16 -2
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +5 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +68 -16
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +70 -60
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +90 -3
- intel_npu_acceleration_library/external/openvino/utils.py +17 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_jax_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/nn/module.py +17 -17
- {bigdl_core_npu-2.6.0b20241112.dist-info → bigdl_core_npu-2.6.0b20241114.dist-info}/WHEEL +0 -0
- {bigdl_core_npu-2.6.0b20241112.dist-info → bigdl_core_npu-2.6.0b20241114.dist-info}/top_level.txt +0 -0
@@ -180,8 +180,7 @@ def multinomial(
|
|
180
180
|
inputs = as_nodes(probs, num_samples, name=name)
|
181
181
|
|
182
182
|
if global_seed < 0:
|
183
|
-
raise RuntimeError(
|
184
|
-
f"global_seed should be positive or 0. Got: {global_seed}")
|
183
|
+
raise RuntimeError(f"global_seed should be positive or 0. Got: {global_seed}")
|
185
184
|
|
186
185
|
if op_seed < 0:
|
187
186
|
raise RuntimeError(f"op_seed should be positive or 0. Got: {op_seed}")
|
@@ -223,8 +222,7 @@ def nms_rotated(
|
|
223
222
|
:param clockwise: Flag that specifies direction of the box rotation.
|
224
223
|
:return: The new node which performs NMSRotated
|
225
224
|
"""
|
226
|
-
inputs = as_nodes(boxes, scores, max_output_boxes_per_class,
|
227
|
-
iou_threshold, score_threshold, name=name)
|
225
|
+
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, name=name)
|
228
226
|
|
229
227
|
attributes = {
|
230
228
|
"sort_result_descending": sort_result_descending,
|
@@ -301,6 +299,7 @@ def constant(
|
|
301
299
|
- dtype force conversion of data.
|
302
300
|
:return: The Constant node initialized with provided data.
|
303
301
|
"""
|
302
|
+
|
304
303
|
def display_shared_memory_warning(warning_message: str) -> None:
|
305
304
|
if shared_memory:
|
306
305
|
log.warning(f"{warning_message}. Memory sharing is disabled by default. Set shared_memory=False to hide this warning.")
|
@@ -313,10 +312,10 @@ def constant(
|
|
313
312
|
# Handle type casting, when dtype is not None:
|
314
313
|
if dtype:
|
315
314
|
# Expect packed data, use different constructor to handle it correctly:
|
316
|
-
if dtype in [Type.u1, Type.i4, Type.u4, Type.nf4]:
|
315
|
+
if dtype in [Type.u1, Type.i4, Type.u4, Type.nf4, Type.f4e2m1]:
|
317
316
|
display_shared_memory_warning(f"Constant initialized with packed type of {dtype}")
|
318
317
|
return Constant(dtype, Shape(_value.shape), _value.flatten().tolist())
|
319
|
-
elif dtype in [Type.bf16]:
|
318
|
+
elif dtype in [Type.bf16, Type.f8e8m0, Type.f8e4m3, Type.f8e5m2]:
|
320
319
|
display_shared_memory_warning(f"Constant initialized with OpenVINO custom {dtype}")
|
321
320
|
return Constant(dtype, Shape(_value.shape), _value.flatten().tolist())
|
322
321
|
# General use-case for all other types:
|
@@ -7,4 +7,6 @@
|
|
7
7
|
# TODO (ticket 138273): Add previous opset operators at the end of opset15 development
|
8
8
|
from openvino.runtime.opset1.ops import parameter
|
9
9
|
from openvino.runtime.opset15.ops import col2im
|
10
|
+
from openvino.runtime.opset15.ops import embedding_bag_offsets
|
11
|
+
from openvino.runtime.opset15.ops import embedding_bag_packed
|
10
12
|
from openvino.runtime.opset15.ops import scatter_nd_update
|
@@ -4,9 +4,12 @@
|
|
4
4
|
|
5
5
|
"""Factory functions for ops added to openvino opset15."""
|
6
6
|
from functools import partial
|
7
|
-
from typing import
|
7
|
+
from typing import List, Literal, Optional
|
8
8
|
|
9
|
+
import numpy as np
|
9
10
|
from openvino.runtime import Node, Type
|
11
|
+
from openvino.runtime.opset1 import convert_like
|
12
|
+
from openvino.runtime.opset14 import constant
|
10
13
|
from openvino.runtime.opset_utils import _get_node_factory
|
11
14
|
from openvino.runtime.utils.decorators import nameable_op
|
12
15
|
from openvino.runtime.utils.types import NodeInput, as_nodes
|
@@ -83,3 +86,61 @@ def col2im(
|
|
83
86
|
"pads_end": pads_end,
|
84
87
|
},
|
85
88
|
)
|
89
|
+
|
90
|
+
|
91
|
+
@nameable_op
|
92
|
+
def embedding_bag_offsets(
|
93
|
+
emb_table: NodeInput,
|
94
|
+
indices: NodeInput,
|
95
|
+
offsets: NodeInput,
|
96
|
+
default_index: Optional[NodeInput] = None,
|
97
|
+
per_sample_weights: Optional[NodeInput] = None,
|
98
|
+
reduction: Literal["sum", "mean"] = "sum",
|
99
|
+
name: Optional[str] = None,
|
100
|
+
) -> Node:
|
101
|
+
"""Return a node which performs sums or means of bags of embeddings without the intermediate embeddings.
|
102
|
+
|
103
|
+
:param emb_table: Tensor containing the embedding lookup table.
|
104
|
+
:param indices: 1D Tensor with indices.
|
105
|
+
:param offsets: 1D Tensor containing the starting index positions of each bag in indices.
|
106
|
+
:param per_sample_weights: Tensor with weights for each sample.
|
107
|
+
:param default_index: Scalar containing default index in embedding table to fill empty bags.
|
108
|
+
If unset or set to -1, empty bags will be filled with 0.
|
109
|
+
Reverse indexing using negative indices is not supported.
|
110
|
+
:param reduction: String to select algorithm used to perform reduction of elements in bag.
|
111
|
+
:param name: Optional name for output node.
|
112
|
+
:return: The new node performing EmbeddingBagOffsets operation.
|
113
|
+
"""
|
114
|
+
inputs = [emb_table, indices, offsets]
|
115
|
+
if default_index is not None:
|
116
|
+
inputs.append(default_index)
|
117
|
+
elif per_sample_weights is not None:
|
118
|
+
inputs.append(convert_like(constant(np.array(-1, np.int32)), inputs[1]))
|
119
|
+
if per_sample_weights is not None:
|
120
|
+
inputs.append(per_sample_weights)
|
121
|
+
|
122
|
+
return _get_node_factory_opset15().create("EmbeddingBagOffsets", as_nodes(*inputs, name=name), {"reduction": reduction})
|
123
|
+
|
124
|
+
|
125
|
+
@nameable_op
|
126
|
+
def embedding_bag_packed(
|
127
|
+
emb_table: NodeInput,
|
128
|
+
indices: NodeInput,
|
129
|
+
per_sample_weights: Optional[NodeInput] = None,
|
130
|
+
reduction: Literal["sum", "mean"] = "sum",
|
131
|
+
name: Optional[str] = None,
|
132
|
+
) -> Node:
|
133
|
+
"""Return a node which performs sums or means of "bags" of embeddings, without the intermediate embeddings.
|
134
|
+
|
135
|
+
:param emb_table: Tensor containing the embedding lookup table.
|
136
|
+
:param indices: 2D Tensor of shape [batch, indices_per_bag] with indices.
|
137
|
+
:param per_sample_weights: Tensor of weights to be multiplied with embedding table with same shape as indices.
|
138
|
+
:param reduction: Operator to perform reduction of elements in bag.
|
139
|
+
:param name: Optional name for output node.
|
140
|
+
:return: The new node performing EmbeddingBagPacked operation.
|
141
|
+
"""
|
142
|
+
inputs = [emb_table, indices]
|
143
|
+
if per_sample_weights is not None:
|
144
|
+
inputs.append(per_sample_weights)
|
145
|
+
|
146
|
+
return _get_node_factory_opset15().create("EmbeddingBagPacked", as_nodes(*inputs, name=name), {"reduction": reduction})
|
@@ -2,35 +2,26 @@
|
|
2
2
|
# Copyright (C) 2018-2024 Intel Corporation
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
import numpy as np
|
6
|
+
|
5
7
|
"""Factory functions for all openvino ops."""
|
6
|
-
from typing import
|
8
|
+
from typing import Optional, Union
|
7
9
|
|
8
|
-
import numpy as np
|
9
10
|
from functools import partial, singledispatch
|
10
11
|
|
11
|
-
from openvino.runtime import Node,
|
12
|
+
from openvino.runtime import Node, Type, PartialShape, Output, Shape
|
12
13
|
from openvino.runtime.op import assign, Constant, Parameter
|
14
|
+
from openvino.runtime.op import read_value as _read_value
|
15
|
+
from openvino.runtime.op.util import VariableInfo, Variable
|
13
16
|
from openvino.runtime.opset_utils import _get_node_factory
|
14
|
-
from openvino.runtime.utils.decorators import
|
15
|
-
from openvino.runtime.utils.input_validation import (
|
16
|
-
assert_list_of_ints,
|
17
|
-
check_valid_attributes,
|
18
|
-
is_non_negative_value,
|
19
|
-
is_positive_value,
|
20
|
-
)
|
21
|
-
from openvino.runtime.utils.node_factory import NodeFactory
|
17
|
+
from openvino.runtime.utils.decorators import nameable_op, overloading
|
22
18
|
from openvino.runtime.utils.types import (
|
23
19
|
NodeInput,
|
24
|
-
NumericData,
|
25
20
|
NumericType,
|
26
|
-
ScalarData,
|
27
21
|
TensorShape,
|
28
22
|
as_node,
|
29
23
|
as_nodes,
|
30
|
-
get_dtype,
|
31
24
|
get_element_type,
|
32
|
-
get_element_type_str,
|
33
|
-
make_constant_node,
|
34
25
|
)
|
35
26
|
|
36
27
|
_get_node_factory_opset6 = partial(_get_node_factory, "opset6")
|
@@ -124,12 +115,12 @@ def mvn(
|
|
124
115
|
return _get_node_factory_opset6().create("MVN", inputs, attributes)
|
125
116
|
|
126
117
|
|
127
|
-
@
|
118
|
+
@overloading(Union[Node, Output], str, Optional[Union[type, np.dtype, Type, str]], Optional[Union[TensorShape, Shape, PartialShape]], Optional[str])
|
128
119
|
@nameable_op
|
129
|
-
def read_value(init_value:
|
120
|
+
def read_value(init_value: Union[Node, Output],
|
130
121
|
variable_id: str,
|
131
|
-
variable_type: Optional[Union[
|
132
|
-
variable_shape: Optional[TensorShape] = None,
|
122
|
+
variable_type: Optional[Union[type, np.dtype, Type, str]] = None,
|
123
|
+
variable_shape: Optional[Union[TensorShape, Shape, PartialShape]] = None,
|
133
124
|
name: Optional[str] = None) -> Node:
|
134
125
|
"""Return a node which produces the Assign operation.
|
135
126
|
|
@@ -140,29 +131,28 @@ def read_value(init_value: NodeInput,
|
|
140
131
|
:param name: Optional name for output node.
|
141
132
|
:return: ReadValue node
|
142
133
|
"""
|
143
|
-
|
134
|
+
info = VariableInfo()
|
135
|
+
info.variable_id = variable_id
|
144
136
|
|
145
137
|
if variable_type is not None:
|
146
138
|
if not isinstance(variable_type, Type) and not isinstance(variable_type, str):
|
147
|
-
|
139
|
+
info.data_type = get_element_type(variable_type)
|
148
140
|
else:
|
149
|
-
|
141
|
+
info.data_type = variable_type
|
150
142
|
|
151
143
|
if variable_shape is not None:
|
152
|
-
|
144
|
+
info.data_shape = PartialShape(variable_shape)
|
153
145
|
|
154
|
-
|
155
|
-
|
156
|
-
[as_node(init_value, name=name)],
|
157
|
-
attr_map,
|
158
|
-
)
|
146
|
+
var_from_info = Variable(info)
|
147
|
+
return _read_value(new_value=as_node(init_value, name=name), variable=var_from_info)
|
159
148
|
|
160
149
|
|
161
|
-
@
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
150
|
+
@overloading(str, Optional[Union[type, np.dtype, Type, str]], Optional[Union[TensorShape, Shape, PartialShape]], Optional[str]) # type: ignore
|
151
|
+
@nameable_op
|
152
|
+
def read_value(variable_id: str, # noqa: F811
|
153
|
+
variable_type: Optional[Union[type, np.dtype, Type, str]] = None,
|
154
|
+
variable_shape: Optional[Union[TensorShape, Shape, PartialShape]] = None,
|
155
|
+
name: Optional[str] = None) -> Node:
|
166
156
|
"""Return a node which produces the Assign operation.
|
167
157
|
|
168
158
|
:param variable_id: Id of a variable to be read.
|
@@ -171,19 +161,46 @@ def _(variable_id: str,
|
|
171
161
|
:param name: Optional name for output node.
|
172
162
|
:return: ReadValue node
|
173
163
|
"""
|
174
|
-
|
164
|
+
info = VariableInfo()
|
165
|
+
info.variable_id = variable_id
|
175
166
|
|
176
167
|
if variable_type is not None:
|
177
168
|
if not isinstance(variable_type, Type) and not isinstance(variable_type, str):
|
178
|
-
|
169
|
+
info.data_type = get_element_type(variable_type)
|
179
170
|
else:
|
180
|
-
|
171
|
+
info.data_type = variable_type
|
181
172
|
|
182
173
|
if variable_shape is not None:
|
183
|
-
|
174
|
+
info.data_shape = PartialShape(variable_shape)
|
175
|
+
|
176
|
+
var_from_info = Variable(info)
|
177
|
+
|
178
|
+
return _read_value(var_from_info)
|
179
|
+
|
180
|
+
|
181
|
+
@overloading(Variable, Optional[str]) # type: ignore
|
182
|
+
@nameable_op
|
183
|
+
def read_value(ov_variable: Variable, # noqa: F811
|
184
|
+
name: Optional[str] = None) -> Node:
|
185
|
+
"""Return a node which produces the Assign operation.
|
184
186
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
)
|
187
|
+
:param ov_variable: Variable to be read.
|
188
|
+
:param name: Optional name for output node.
|
189
|
+
:return: ReadValue node
|
190
|
+
"""
|
191
|
+
return _read_value(ov_variable)
|
192
|
+
|
193
|
+
|
194
|
+
@overloading(Union[Node, Output], Variable, Optional[str]) # type: ignore
|
195
|
+
@nameable_op
|
196
|
+
def read_value(init_value: Union[Node, Output], # noqa: F811
|
197
|
+
ov_variable: Variable,
|
198
|
+
name: Optional[str] = None) -> Node:
|
199
|
+
"""Return a node which produces the Assign operation.
|
200
|
+
|
201
|
+
:param init_value: Optional node producing a value to be returned instead of an unassigned variable.
|
202
|
+
:param ov_variable: Variable to be read.
|
203
|
+
:param name: Optional name for output node.
|
204
|
+
:return: ReadValue node
|
205
|
+
"""
|
206
|
+
return _read_value(as_node(init_value, name=name), ov_variable)
|
@@ -336,6 +336,7 @@ def random_uniform(
|
|
336
336
|
output_type: str,
|
337
337
|
global_seed: int = 0,
|
338
338
|
op_seed: int = 0,
|
339
|
+
alignment: str = "tensorflow",
|
339
340
|
name: Optional[str] = None,
|
340
341
|
) -> Node:
|
341
342
|
"""Return a node which generates sequence of random values from uniform distribution.
|
@@ -347,6 +348,8 @@ def random_uniform(
|
|
347
348
|
'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'.
|
348
349
|
:param global_seed: Specifies global seed value. Required to be a positive integer or 0.
|
349
350
|
:param op_seed: Specifies operational seed value. Required to be a positive integer or 0.
|
351
|
+
:param alignment: Specifies alignment of the randomly generated numbers to a given framework.
|
352
|
+
Possible values: 'tensorflow', 'pytorch'. Default is 'tensorflow'.
|
350
353
|
:param name: Optional output node name.
|
351
354
|
|
352
355
|
:return: The new node which performs generation of random values from uniform distribution.
|
@@ -363,6 +366,7 @@ def random_uniform(
|
|
363
366
|
"output_type": output_type,
|
364
367
|
"global_seed": global_seed,
|
365
368
|
"op_seed": op_seed,
|
369
|
+
"alignment": alignment.lower()
|
366
370
|
}
|
367
371
|
return _get_node_factory_opset8().create("RandomUniform", inputs, attributes)
|
368
372
|
|
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
from functools import wraps
|
6
6
|
from inspect import getfullargspec
|
7
|
-
from typing import Any, Callable,
|
7
|
+
from typing import Any, Callable, Dict, Optional, Union, get_origin, get_args
|
8
8
|
|
9
9
|
from openvino.runtime import Node, Output
|
10
10
|
from openvino.runtime.utils.types import NodeInput, as_node, as_nodes
|
@@ -68,3 +68,69 @@ def custom_preprocess_function(custom_function: Callable) -> Callable:
|
|
68
68
|
return Output._from_node(custom_function(node))
|
69
69
|
|
70
70
|
return wrapper
|
71
|
+
|
72
|
+
|
73
|
+
class MultiMethod(object):
|
74
|
+
def __init__(self, name: str):
|
75
|
+
self.name = name
|
76
|
+
self.typemap: Dict[tuple, Callable] = {}
|
77
|
+
|
78
|
+
# Checks if actual_type is a subclass of any type in the union
|
79
|
+
def matches_union(self, union_type, actual_type) -> bool: # type: ignore
|
80
|
+
for type_arg in get_args(union_type):
|
81
|
+
if isinstance(type_arg, type) and issubclass(actual_type, type_arg):
|
82
|
+
return True
|
83
|
+
elif get_origin(type_arg) == list:
|
84
|
+
if issubclass(actual_type, list):
|
85
|
+
return True
|
86
|
+
return False
|
87
|
+
|
88
|
+
def matches_optional(self, optional_type, actual_type) -> bool: # type: ignore
|
89
|
+
return actual_type is None or self.matches_union(optional_type, actual_type)
|
90
|
+
|
91
|
+
# Checks whether there is overloading which matches invoked argument types
|
92
|
+
def check_invoked_types_in_overloaded_funcs(self, tuple_to_check: tuple, key_structure: tuple) -> bool:
|
93
|
+
for actual_type, expected_type in zip(tuple_to_check, key_structure):
|
94
|
+
origin = get_origin(expected_type)
|
95
|
+
if origin is Union:
|
96
|
+
if not self.matches_union(expected_type, actual_type):
|
97
|
+
return False
|
98
|
+
elif origin is Optional:
|
99
|
+
if not self.matches_optional(expected_type, actual_type):
|
100
|
+
return False
|
101
|
+
elif not issubclass(actual_type, expected_type):
|
102
|
+
return False
|
103
|
+
return True
|
104
|
+
|
105
|
+
def __call__(self, *args) -> Any: # type: ignore
|
106
|
+
types = tuple(arg.__class__ for arg in args)
|
107
|
+
key_matched = None
|
108
|
+
for key in self.typemap.keys():
|
109
|
+
if self.check_invoked_types_in_overloaded_funcs(types, key):
|
110
|
+
key_matched = key
|
111
|
+
break
|
112
|
+
|
113
|
+
if key_matched is None:
|
114
|
+
raise TypeError("no match")
|
115
|
+
|
116
|
+
function = self.typemap.get(key_matched)
|
117
|
+
return function(*args) # type: ignore
|
118
|
+
|
119
|
+
def register(self, types: tuple, function: Callable) -> None:
|
120
|
+
if types in self.typemap:
|
121
|
+
raise TypeError("duplicate registration")
|
122
|
+
self.typemap[types] = function
|
123
|
+
|
124
|
+
|
125
|
+
registry: Dict[str, MultiMethod] = {}
|
126
|
+
|
127
|
+
|
128
|
+
def overloading(*types: tuple) -> Callable:
|
129
|
+
def register(function: Callable) -> MultiMethod:
|
130
|
+
name = function.__name__
|
131
|
+
mm = registry.get(name)
|
132
|
+
if mm is None:
|
133
|
+
mm = registry[name] = MultiMethod(name)
|
134
|
+
mm.register(types, function)
|
135
|
+
return mm
|
136
|
+
return register
|
@@ -266,7 +266,7 @@ def get_numpy_tensors(numpy_paths: List[str], info: AppInputInfo, batch_sizes: L
|
|
266
266
|
else:
|
267
267
|
try:
|
268
268
|
if info.layout.has_name("N"):
|
269
|
-
numpy_arrays[[None] * info.layout.get_index_by_name("N") + [b]] = numpy_arr
|
269
|
+
numpy_arrays[[None] * info.layout.get_index_by_name("N") + [b]] = numpy_arr[b]
|
270
270
|
else:
|
271
271
|
numpy_arrays = numpy_arr
|
272
272
|
except ValueError:
|
@@ -291,7 +291,7 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
|
|
291
291
|
tensors = []
|
292
292
|
for i in range(niter):
|
293
293
|
shape_id = i % num_shapes
|
294
|
-
dtype = get_dtype(info.element_type)
|
294
|
+
dtype = np.uint8() if info.element_type.bitwidth < 8 else get_dtype(info.element_type)
|
295
295
|
shape = list(info.shapes[shape_id])
|
296
296
|
binaries = np.ndarray(shape=shape, dtype=dtype)
|
297
297
|
binary_index = processed_frames
|
@@ -301,14 +301,14 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
|
|
301
301
|
binary_filename: str = binary_paths[binary_index]
|
302
302
|
extension = binary_filename.lower().split('.')[-1]
|
303
303
|
if extension == "bin":
|
304
|
-
|
305
|
-
|
306
|
-
if
|
304
|
+
binary_file_bit_size = os.path.getsize(binary_filename) * 8
|
305
|
+
blob_bit_size = info.element_type.bitwidth * int(np.prod(shape))
|
306
|
+
if blob_bit_size != binary_file_bit_size:
|
307
307
|
raise Exception(
|
308
|
-
f"File {binary_filename} contains {
|
309
|
-
from_file = np.
|
308
|
+
f"File {binary_filename} contains {binary_file_bit_size} bites but model expects {blob_bit_size}")
|
309
|
+
from_file = np.fromfile(binary_filename, dtype)
|
310
310
|
if info.layout.has_name("N"):
|
311
|
-
binaries[[None] * info.layout.get_index_by_name("N") + [b]] = from_file
|
311
|
+
binaries[[None] * info.layout.get_index_by_name("N") + [b]] = from_file[b]
|
312
312
|
else:
|
313
313
|
binaries = from_file
|
314
314
|
else:
|
@@ -317,7 +317,7 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
|
|
317
317
|
|
318
318
|
binary_index += 1
|
319
319
|
processed_frames += current_batch_size
|
320
|
-
tensors.append(Tensor(binaries))
|
320
|
+
tensors.append(Tensor(binaries, shape, info.element_type))
|
321
321
|
return tensors
|
322
322
|
|
323
323
|
|
@@ -36,6 +36,10 @@ from openvino.tools.ovc.telemetry_utils import send_params_info, send_conversion
|
|
36
36
|
init_mo_telemetry
|
37
37
|
from openvino.tools.ovc.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example
|
38
38
|
from openvino.tools.ovc.moc_frontend.paddle_frontend_utils import paddle_frontend_converter
|
39
|
+
try:
|
40
|
+
from openvino.tools.ovc.moc_frontend.jax_frontend_utils import get_jax_decoder
|
41
|
+
except:
|
42
|
+
get_jax_decoder = None
|
39
43
|
|
40
44
|
# pylint: disable=no-name-in-module,import-error
|
41
45
|
from openvino.frontend import FrontEndManager, OpConversionFailure, TelemetryExtension
|
@@ -228,6 +232,11 @@ def check_model_object(argv):
|
|
228
232
|
paddle.fluid.dygraph.layers.Layer) or isinstance(
|
229
233
|
model, paddle.fluid.executor.Executor):
|
230
234
|
return "paddle"
|
235
|
+
|
236
|
+
if 'jax' in sys.modules:
|
237
|
+
import jax
|
238
|
+
if isinstance(model, (jax.core.Jaxpr, jax.core.ClosedJaxpr)):
|
239
|
+
return "jax"
|
231
240
|
|
232
241
|
raise Error('Unknown model type: {}'.format(type(model)))
|
233
242
|
|
@@ -319,6 +328,7 @@ def normalize_inputs(argv: argparse.Namespace):
|
|
319
328
|
"""
|
320
329
|
# Parse input to list of InputCutInfo
|
321
330
|
inputs = input_to_input_cut_info(argv.input)
|
331
|
+
argv.input = inputs
|
322
332
|
|
323
333
|
# Make list of input names
|
324
334
|
input_names_list = []
|
@@ -329,8 +339,6 @@ def normalize_inputs(argv: argparse.Namespace):
|
|
329
339
|
assert len(input_names_list) == len(inputs), "\"input\" parameter has unnamed inputs and named inputs. " \
|
330
340
|
"Please either set names for all inputs, " \
|
331
341
|
"or do not set names for all inputs."
|
332
|
-
argv.inputs_list = input_names_list
|
333
|
-
argv.input = ','.join(input_names_list)
|
334
342
|
|
335
343
|
if len(input_names_list) > 0:
|
336
344
|
# Named inputs case
|
@@ -462,6 +470,12 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
|
462
470
|
outputs)
|
463
471
|
pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel()
|
464
472
|
args['input_model'] = pdmodel
|
473
|
+
if model_framework == "jax":
|
474
|
+
if get_jax_decoder is not None:
|
475
|
+
get_jax_decoder(args['input_model'], args)
|
476
|
+
else:
|
477
|
+
raise Error("JAX Frontend is not available.")
|
478
|
+
|
465
479
|
|
466
480
|
argv = pack_params_to_args_namespace(args, cli_parser, python_api_used)
|
467
481
|
argv.framework = model_framework
|
@@ -10,6 +10,11 @@ except ImportError:
|
|
10
10
|
import openvino.tools.ovc.telemetry_stub as tm
|
11
11
|
from openvino.tools.ovc.convert_impl import _convert
|
12
12
|
from openvino.tools.ovc.cli_parser import get_model_name_from_args
|
13
|
+
from openvino.tools.ovc.utils import import_openvino_tokenizers
|
14
|
+
|
15
|
+
# TODO 131000: temporal workaround to patch OpenVINO Core and frontends with tokenizers extensions
|
16
|
+
# make OVC tool to convert models requiring openvino-tokenizers extensions
|
17
|
+
import_openvino_tokenizers()
|
13
18
|
|
14
19
|
# pylint: disable=no-name-in-module,import-error
|
15
20
|
from openvino.runtime import save_model
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import logging as log
|
5
|
+
|
6
|
+
|
7
|
+
def get_jax_decoder(model, args):
|
8
|
+
try:
|
9
|
+
from openvino.frontend.jax.jaxpr_decoder import JaxprPythonDecoder
|
10
|
+
except Exception as e:
|
11
|
+
log.error("JAX frontend loading failed")
|
12
|
+
raise e
|
13
|
+
|
14
|
+
if not isinstance(model, JaxprPythonDecoder):
|
15
|
+
decoder = JaxprPythonDecoder(model)
|
16
|
+
else:
|
17
|
+
decoder = model
|
18
|
+
|
19
|
+
args['input_model'] = decoder
|
@@ -72,12 +72,14 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
|
72
72
|
outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
|
73
73
|
input_model.override_all_outputs([x['node'] for x in outputs])
|
74
74
|
'''
|
75
|
-
argv.placeholder_shapes, argv.placeholder_data_types = convert_params_lists_to_dicts(
|
76
|
-
input_model, argv.placeholder_shapes, argv.placeholder_data_types)
|
77
75
|
|
78
|
-
|
79
|
-
|
80
|
-
|
76
|
+
enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
|
77
|
+
if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
|
78
|
+
# NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
|
79
|
+
framework_model = moc_front_end.decode(input_model)
|
80
|
+
json_model_analysis_dump(framework_model)
|
81
|
+
# a model is not processed further in json analysis mode
|
82
|
+
sys.exit(0)
|
81
83
|
|
82
84
|
def check_places_are_same(places_original: List[Place], places_new: List[Place]):
|
83
85
|
"""
|
@@ -90,6 +92,67 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
|
90
92
|
[item for item in places_original if any(
|
91
93
|
[item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original)
|
92
94
|
|
95
|
+
if getattr(argv, "framework", None) == "pytorch":
|
96
|
+
iplaces = []
|
97
|
+
for idx, input_info in enumerate(argv.input):
|
98
|
+
if getattr(input_info, "name", None):
|
99
|
+
place = input_model.get_place_by_tensor_name(input_info.name)
|
100
|
+
if not input_info.shape and not input_info.type:
|
101
|
+
# If we received place by name, we need to use it for FE to verify
|
102
|
+
# that such name exist, otherwise we silently ignore it.
|
103
|
+
# Using dynamic shape should be safe, because FE will not overwrite
|
104
|
+
# the shape that was produced after conversion, but merge it, so
|
105
|
+
# dynamic shape will not change anything.
|
106
|
+
input_model.set_partial_shape(place, PartialShape.dynamic())
|
107
|
+
else:
|
108
|
+
place = input_model.get_place_by_input_index(idx)
|
109
|
+
iplaces.append(place)
|
110
|
+
if input_info.shape is not None:
|
111
|
+
input_model.set_partial_shape(place, input_info.shape)
|
112
|
+
if input_info.type is not None:
|
113
|
+
input_model.set_element_type(place, input_info.type)
|
114
|
+
model_inputs = input_model.get_inputs()
|
115
|
+
def merge_inputs(inputs, to_set_list):
|
116
|
+
# use input places instead of obtained by index if they are the same
|
117
|
+
res = []
|
118
|
+
for p in to_set_list:
|
119
|
+
found = False
|
120
|
+
for i in inputs:
|
121
|
+
if p.is_equal(i):
|
122
|
+
res.append(i)
|
123
|
+
found = True
|
124
|
+
break
|
125
|
+
if not found:
|
126
|
+
res.append(p)
|
127
|
+
return res
|
128
|
+
iplaces = merge_inputs(model_inputs, iplaces)
|
129
|
+
# Currently this only work to reorder inputs/outputs
|
130
|
+
to_override_all_inputs = check_places_are_same(model_inputs, [{"node": p} for p in iplaces])
|
131
|
+
to_override_all_outputs = False
|
132
|
+
if argv.output:
|
133
|
+
oplaces = []
|
134
|
+
_outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
|
135
|
+
for out_desc in _outputs:
|
136
|
+
oplaces.append(out_desc["name"])
|
137
|
+
model_outputs = input_model.get_outputs()
|
138
|
+
to_override_all_outputs = check_places_are_same(model_outputs, [{"node": p} for p in oplaces])
|
139
|
+
if to_override_all_inputs and to_override_all_outputs:
|
140
|
+
input_model.extract_subgraph(iplaces, oplaces)
|
141
|
+
elif to_override_all_inputs:
|
142
|
+
input_model.override_all_inputs(iplaces)
|
143
|
+
elif to_override_all_outputs:
|
144
|
+
input_model.override_all_outputs(oplaces)
|
145
|
+
|
146
|
+
ov_model = moc_front_end.convert(input_model)
|
147
|
+
return ov_model
|
148
|
+
|
149
|
+
argv.placeholder_shapes, argv.placeholder_data_types = convert_params_lists_to_dicts(
|
150
|
+
input_model, argv.placeholder_shapes, argv.placeholder_data_types)
|
151
|
+
|
152
|
+
user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
|
153
|
+
input_model, argv.placeholder_shapes, argv.placeholder_data_types,
|
154
|
+
argv.output, {}, moc_front_end.get_name())
|
155
|
+
|
93
156
|
def add_names_to_tensors(model: InputModel, places: List[Place]):
|
94
157
|
"""
|
95
158
|
Adds additional names to some model input tensors. This helper should be used
|
@@ -107,14 +170,6 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
|
107
170
|
log.warning('Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'.format(
|
108
171
|
new_input['input_name'], str(e)))
|
109
172
|
|
110
|
-
enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
|
111
|
-
if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
|
112
|
-
# NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
|
113
|
-
framework_model = moc_front_end.decode(input_model)
|
114
|
-
json_model_analysis_dump(framework_model)
|
115
|
-
# a model is not processed further in json analysis mode
|
116
|
-
sys.exit(0)
|
117
|
-
|
118
173
|
model_inputs = input_model.get_inputs()
|
119
174
|
inputs_equal = True
|
120
175
|
if user_shapes:
|
@@ -238,9 +293,6 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
|
238
293
|
|
239
294
|
input_model.set_tensor_value(place, value)
|
240
295
|
|
241
|
-
def shape_to_array(shape: PartialShape):
|
242
|
-
return [shape.get_dimension(i) for i in range(shape.rank.get_length())]
|
243
|
-
|
244
296
|
ov_model = moc_front_end.convert(input_model)
|
245
297
|
|
246
298
|
return ov_model
|