bigdl-core-npu 2.5.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl_core_npu-2.5.0.dist-info/METADATA +35 -0
- bigdl_core_npu-2.5.0.dist-info/RECORD +223 -0
- bigdl_core_npu-2.5.0.dist-info/WHEEL +5 -0
- bigdl_core_npu-2.5.0.dist-info/top_level.txt +1 -0
- intel_npu_acceleration_library/__init__.py +24 -0
- intel_npu_acceleration_library/_version.py +6 -0
- intel_npu_acceleration_library/backend/__init__.py +37 -0
- intel_npu_acceleration_library/backend/base.py +215 -0
- intel_npu_acceleration_library/backend/bindings.py +279 -0
- intel_npu_acceleration_library/backend/compression.py +24 -0
- intel_npu_acceleration_library/backend/convolution.py +58 -0
- intel_npu_acceleration_library/backend/factory.py +944 -0
- intel_npu_acceleration_library/backend/linear.py +60 -0
- intel_npu_acceleration_library/backend/matmul.py +59 -0
- intel_npu_acceleration_library/backend/mlp.py +58 -0
- intel_npu_acceleration_library/backend/ops.py +141 -0
- intel_npu_acceleration_library/backend/qlinear.py +71 -0
- intel_npu_acceleration_library/backend/qmatmul.py +66 -0
- intel_npu_acceleration_library/backend/runtime.py +210 -0
- intel_npu_acceleration_library/backend/sdpa.py +107 -0
- intel_npu_acceleration_library/backend/tensor.py +1050 -0
- intel_npu_acceleration_library/backend/utils.py +70 -0
- intel_npu_acceleration_library/compiler.py +194 -0
- intel_npu_acceleration_library/device.py +230 -0
- intel_npu_acceleration_library/dtypes.py +122 -0
- intel_npu_acceleration_library/external/openvino/__init__.py +71 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
- intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +352 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +139 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +98 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +119 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +289 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +118 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +536 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +256 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +460 -0
- intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
- intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +26 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +4 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +21 -0
- intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
- intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +18 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3067 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +399 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +10 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +189 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +783 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +38 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +429 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +70 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
- intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +536 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +35 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +246 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +205 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
- intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
- intel_npu_acceleration_library/external/openvino/utils.py +98 -0
- intel_npu_acceleration_library/functional/__init__.py +8 -0
- intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
- intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/modelling.py +150 -0
- intel_npu_acceleration_library/nn/__init__.py +20 -0
- intel_npu_acceleration_library/nn/autograd.py +68 -0
- intel_npu_acceleration_library/nn/conv.py +257 -0
- intel_npu_acceleration_library/nn/functional.py +1207 -0
- intel_npu_acceleration_library/nn/linear.py +162 -0
- intel_npu_acceleration_library/nn/llm.py +417 -0
- intel_npu_acceleration_library/nn/module.py +393 -0
- intel_npu_acceleration_library/optimizations.py +157 -0
- intel_npu_acceleration_library/quantization.py +174 -0
intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py
ADDED
@@ -0,0 +1,205 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import logging as log
|
5
|
+
import sys
|
6
|
+
|
7
|
+
import numpy as np
|
8
|
+
# pylint: disable=no-name-in-module,import-error
|
9
|
+
from openvino.runtime import Tensor, PartialShape
|
10
|
+
from openvino.tools.ovc.error import Error
|
11
|
+
|
12
|
+
|
13
|
+
|
14
|
+
def get_pytorch_decoder(model, example_inputs, args):
|
15
|
+
try:
|
16
|
+
from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder
|
17
|
+
from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder
|
18
|
+
from openvino.frontend.pytorch.module_extension import ModuleExtension
|
19
|
+
import torch
|
20
|
+
except Exception as e:
|
21
|
+
log.error("PyTorch frontend loading failed")
|
22
|
+
raise e
|
23
|
+
|
24
|
+
def extract_module_extensions(args):
|
25
|
+
extensions = args.get('extension', []) or []
|
26
|
+
if not isinstance(extensions, (list, tuple)):
|
27
|
+
extensions = [extensions]
|
28
|
+
return {extension.module: extension for extension in extensions if isinstance(extension, ModuleExtension)}
|
29
|
+
|
30
|
+
if 'nncf' in sys.modules:
|
31
|
+
is_good_version = True
|
32
|
+
try:
|
33
|
+
from nncf.torch.nncf_network import NNCFNetwork
|
34
|
+
|
35
|
+
if isinstance(model, NNCFNetwork):
|
36
|
+
from packaging import version
|
37
|
+
if version.parse(sys.modules['nncf'].__version__) < version.parse("2.6"):
|
38
|
+
is_good_version = False
|
39
|
+
except:
|
40
|
+
pass
|
41
|
+
if not is_good_version:
|
42
|
+
raise RuntimeError(
|
43
|
+
"NNCF models produced by nncf<2.6 are not supported directly. Please upgrade nncf or export to ONNX first.")
|
44
|
+
inputs = prepare_torch_inputs(example_inputs)
|
45
|
+
if not isinstance(model, (TorchScriptPythonDecoder, TorchFXPythonDecoder)):
|
46
|
+
if hasattr(torch, "export") and isinstance(model, (torch.export.ExportedProgram)):
|
47
|
+
from packaging import version
|
48
|
+
if version.parse(torch.__version__) >= version.parse("2.2"):
|
49
|
+
model = model.run_decompositions()
|
50
|
+
gm = model.module()
|
51
|
+
decoder = TorchFXPythonDecoder(gm)
|
52
|
+
else:
|
53
|
+
decoder = TorchScriptPythonDecoder(
|
54
|
+
model,
|
55
|
+
example_input=inputs,
|
56
|
+
shared_memory=args.get("share_weights", True),
|
57
|
+
module_extensions=extract_module_extensions(args))
|
58
|
+
else:
|
59
|
+
decoder = model
|
60
|
+
args['input_model'] = decoder
|
61
|
+
args["example_input"] = inputs
|
62
|
+
|
63
|
+
return args
|
64
|
+
|
65
|
+
|
66
|
+
def update_list_or_dict(container, name, idx, value):
|
67
|
+
if isinstance(container, dict):
|
68
|
+
if name is None:
|
69
|
+
name = list(container)[idx]
|
70
|
+
container[name] = value
|
71
|
+
return
|
72
|
+
if idx == len(container):
|
73
|
+
container.append(value)
|
74
|
+
elif idx > len(container):
|
75
|
+
raise Error(f"Wrong {idx}")
|
76
|
+
else:
|
77
|
+
container[idx] = value
|
78
|
+
return
|
79
|
+
|
80
|
+
|
81
|
+
def get_value_from_list_or_dict(container, name, idx):
|
82
|
+
if isinstance(container, dict):
|
83
|
+
if name is None:
|
84
|
+
if idx < len(container):
|
85
|
+
name = list(container)[idx]
|
86
|
+
return None
|
87
|
+
return container.get(name)
|
88
|
+
if idx < len(container):
|
89
|
+
return container[idx]
|
90
|
+
return None
|
91
|
+
|
92
|
+
|
93
|
+
def extract_input_info_from_example(args, inputs):
|
94
|
+
try:
|
95
|
+
from openvino.frontend.pytorch.utils import pt_to_ov_type_map # pylint: disable=no-name-in-module,import-error
|
96
|
+
except Exception as e:
|
97
|
+
log.error("PyTorch frontend loading failed")
|
98
|
+
raise e
|
99
|
+
example_inputs = args.example_input
|
100
|
+
data_types = args.placeholder_data_types or {}
|
101
|
+
input_shapes = args.placeholder_shapes or {}
|
102
|
+
is_dict_input = isinstance(example_inputs, dict)
|
103
|
+
list_inputs = list(example_inputs.values()) if is_dict_input else example_inputs
|
104
|
+
input_names = None
|
105
|
+
if not isinstance(example_inputs, (list, tuple, dict)):
|
106
|
+
list_inputs = [list_inputs]
|
107
|
+
if args.input_model._input_is_list:
|
108
|
+
list_inputs[0] = list_inputs[0].unsqueeze(0)
|
109
|
+
if args.input_model._input_signature is not None and not is_dict_input:
|
110
|
+
input_names = args.input_model._input_signature[1:] if args.input_model._input_signature[
|
111
|
+
0] == "self" else args.input_model._input_signature
|
112
|
+
if not is_dict_input:
|
113
|
+
example_inputs = dict(zip(input_names, list_inputs))
|
114
|
+
is_dict_input = True
|
115
|
+
elif is_dict_input:
|
116
|
+
input_names = list(example_inputs)
|
117
|
+
if not data_types and input_names is None:
|
118
|
+
data_types = []
|
119
|
+
if not input_shapes and input_names is None:
|
120
|
+
input_shapes = []
|
121
|
+
if inputs:
|
122
|
+
for input_id, input_info in enumerate(inputs):
|
123
|
+
input_name = input_info.name
|
124
|
+
if is_dict_input and input_name in example_inputs:
|
125
|
+
example_input = example_inputs[input_name]
|
126
|
+
else:
|
127
|
+
example_input = list_inputs[input_id]
|
128
|
+
if is_dict_input and input_name is None:
|
129
|
+
input_name = input_names[input_id]
|
130
|
+
dtype = getattr(example_input, "dtype", type(example_input))
|
131
|
+
example_dtype = pt_to_ov_type_map.get(str(dtype))
|
132
|
+
user_dtype = get_value_from_list_or_dict(data_types, input_name, input_id)
|
133
|
+
if user_dtype is not None and example_dtype is not None and example_dtype != user_dtype:
|
134
|
+
raise Error(
|
135
|
+
f"Defined input type {user_dtype} is not equal to provided example_input type {example_dtype}")
|
136
|
+
|
137
|
+
data_rank = getattr(example_input, "ndim", 0)
|
138
|
+
user_input_shape = get_value_from_list_or_dict(input_shapes, input_name, input_id)
|
139
|
+
if user_input_shape.rank.is_static and user_input_shape.rank.get_length() != data_rank:
|
140
|
+
raise Error(
|
141
|
+
f"Requested input shape {user_input_shape.rank.get_length()} rank"
|
142
|
+
f" is not equal to provided example_input rank {data_rank}")
|
143
|
+
|
144
|
+
input_shape = user_input_shape if user_input_shape is not None else PartialShape([-1] * data_rank)
|
145
|
+
update_list_or_dict(data_types, input_name, input_id,
|
146
|
+
example_dtype if example_dtype is not None else None)
|
147
|
+
update_list_or_dict(input_shapes, input_name, input_id, input_shape)
|
148
|
+
else:
|
149
|
+
for input_id, example_input in enumerate(list_inputs):
|
150
|
+
dtype = getattr(example_input, "dtype", type(example_input))
|
151
|
+
ov_dtype = pt_to_ov_type_map.get(str(dtype))
|
152
|
+
data_rank = getattr(example_input, "ndim", 0)
|
153
|
+
input_shape = PartialShape([-1] * data_rank)
|
154
|
+
input_name = input_names[input_id] if input_names else None
|
155
|
+
update_list_or_dict(input_shapes, input_name, input_id, input_shape)
|
156
|
+
update_list_or_dict(data_types, input_name, input_id, ov_dtype if ov_dtype is not None else None)
|
157
|
+
|
158
|
+
args.placeholder_data_types = data_types
|
159
|
+
args.placeholder_shapes = input_shapes
|
160
|
+
if not args.input and input_names:
|
161
|
+
args.input_list = input_names
|
162
|
+
args.input = ",".join(input_names)
|
163
|
+
|
164
|
+
|
165
|
+
# pylint: disable=no-member
|
166
|
+
def to_torch_tensor(tensor):
|
167
|
+
import torch # pylint: disable=import-error
|
168
|
+
if isinstance(tensor, torch.Tensor):
|
169
|
+
return tensor
|
170
|
+
if isinstance(tensor, np.ndarray):
|
171
|
+
return torch.tensor(tensor)
|
172
|
+
if isinstance(tensor, Tensor):
|
173
|
+
return torch.tensor(tensor.data)
|
174
|
+
if isinstance(tensor, (float, int, bool)):
|
175
|
+
return tensor
|
176
|
+
if isinstance(tensor, (tuple, list)):
|
177
|
+
# TODO: Function to_torch_tensor should be renamed as it handles not only a tensor
|
178
|
+
return tuple(to_torch_tensor(x) for x in tensor)
|
179
|
+
if isinstance(tensor, dict) and all(isinstance(k, str) for k in tensor.keys()):
|
180
|
+
return dict((k, to_torch_tensor(x)) for k, x in tensor.items())
|
181
|
+
else:
|
182
|
+
raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. "
|
183
|
+
"Got {}".format(type(tensor)))
|
184
|
+
|
185
|
+
|
186
|
+
def prepare_torch_inputs(example_inputs):
|
187
|
+
inputs = None
|
188
|
+
if example_inputs is not None:
|
189
|
+
inputs = example_inputs
|
190
|
+
if isinstance(inputs, list):
|
191
|
+
inputs = [to_torch_tensor(x) for x in inputs]
|
192
|
+
elif isinstance(inputs, tuple):
|
193
|
+
inputs = [to_torch_tensor(x) for x in inputs]
|
194
|
+
inputs = tuple(inputs)
|
195
|
+
elif isinstance(inputs, dict):
|
196
|
+
for name, tensor in inputs.items():
|
197
|
+
assert isinstance(name, str), "Expected dictionary where keys are input names of string type and" \
|
198
|
+
" values are tensors. Got key of type {}".format(type(name))
|
199
|
+
inputs[name] = to_torch_tensor(tensor)
|
200
|
+
else:
|
201
|
+
inputs = to_torch_tensor(inputs)
|
202
|
+
else:
|
203
|
+
# No example_input were provided, decoder will use scripting
|
204
|
+
return None
|
205
|
+
return inputs
|
@@ -0,0 +1,109 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import sys
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
from openvino.runtime import Shape, PartialShape, Dimension # pylint: disable=no-name-in-module,import-error
|
8
|
+
from openvino.tools.ovc.error import Error
|
9
|
+
|
10
|
+
|
11
|
+
def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None):
|
12
|
+
# Current function returns list with static dimensions with following logic.
|
13
|
+
# For dynamic dimensions return lower boundaries if they are set, otherwise
|
14
|
+
# return upper boundaries if they are set. If dimension is fully dynamic then raise error.
|
15
|
+
shape_list = []
|
16
|
+
for idx, dim in enumerate(shape):
|
17
|
+
if isinstance(dim, int):
|
18
|
+
if dim == -1:
|
19
|
+
shape_list.append(dynamic_value)
|
20
|
+
continue
|
21
|
+
shape_list.append(dim)
|
22
|
+
elif isinstance(dim, np.int64):
|
23
|
+
if dim == np.int64(-1):
|
24
|
+
shape_list.append(dynamic_value)
|
25
|
+
continue
|
26
|
+
shape_list.append(dim)
|
27
|
+
elif isinstance(dim, tuple):
|
28
|
+
# tuple where (min_length, max_length), the format which uses MO cli parser
|
29
|
+
assert len(dim) == 2, "Unknown dimension type {}".format(dim)
|
30
|
+
if dim[0] > 0:
|
31
|
+
shape_list.append(dim[0])
|
32
|
+
elif dim[1] < np.iinfo(np.int64).max:
|
33
|
+
shape_list.append(dim[1])
|
34
|
+
else:
|
35
|
+
shape_list.append(dynamic_value)
|
36
|
+
continue
|
37
|
+
elif isinstance(dim, Dimension):
|
38
|
+
if dim.is_static or dim.get_min_length() > 0:
|
39
|
+
shape_list.append(dim.get_min_length())
|
40
|
+
elif dim.get_max_length() != -1:
|
41
|
+
shape_list.append(dim.get_max_length())
|
42
|
+
else:
|
43
|
+
shape_list.append(dynamic_value)
|
44
|
+
continue
|
45
|
+
else:
|
46
|
+
raise Error("Unknown dimension type {}".format(dim))
|
47
|
+
|
48
|
+
return tuple(shape_list)
|
49
|
+
|
50
|
+
|
51
|
+
def get_dynamic_dims(shape: [PartialShape, list, tuple]):
|
52
|
+
dynamic_dims = []
|
53
|
+
for idx, dim in enumerate(shape):
|
54
|
+
if isinstance(dim, int):
|
55
|
+
if dim == -1:
|
56
|
+
dynamic_dims.append(idx)
|
57
|
+
if isinstance(dim, np.int64):
|
58
|
+
if dim == np.int64(-1):
|
59
|
+
dynamic_dims.append(idx)
|
60
|
+
elif isinstance(dim, tuple):
|
61
|
+
dynamic_dims.append(idx)
|
62
|
+
elif isinstance(dim, Dimension):
|
63
|
+
if dim.get_min_length() == 0 and dim.get_max_length() == -1:
|
64
|
+
dynamic_dims.append(idx)
|
65
|
+
|
66
|
+
return dynamic_dims
|
67
|
+
|
68
|
+
|
69
|
+
def tensor_to_int_list(tensor):
|
70
|
+
assert hasattr(tensor, 'numpy'), "Could not get value of provided tensor: {}".format(tensor)
|
71
|
+
tensor_numpy = tensor.numpy()
|
72
|
+
assert tensor_numpy.dtype == np.int32, "Unexpected type of provided tensor. Expected int32, got: {}".format(
|
73
|
+
tensor_numpy.dtype)
|
74
|
+
return tensor_numpy.tolist()
|
75
|
+
|
76
|
+
|
77
|
+
def to_partial_shape(shape):
|
78
|
+
if 'tensorflow' in sys.modules:
|
79
|
+
import tensorflow as tf # pylint: disable=import-error
|
80
|
+
if isinstance(shape, tf.Tensor):
|
81
|
+
return PartialShape(tensor_to_int_list(shape))
|
82
|
+
if isinstance(shape, tf.TensorShape):
|
83
|
+
return PartialShape(list(shape))
|
84
|
+
if 'paddle' in sys.modules:
|
85
|
+
import paddle
|
86
|
+
if isinstance(shape, paddle.Tensor):
|
87
|
+
return PartialShape(tensor_to_int_list(shape))
|
88
|
+
return PartialShape(shape)
|
89
|
+
|
90
|
+
|
91
|
+
def is_shape_type(value):
|
92
|
+
if isinstance(value, PartialShape):
|
93
|
+
return True
|
94
|
+
if 'tensorflow' in sys.modules:
|
95
|
+
import tensorflow as tf # pylint: disable=import-error
|
96
|
+
if isinstance(value, (tf.TensorShape, tf.Tensor)):
|
97
|
+
return True
|
98
|
+
if 'paddle' in sys.modules:
|
99
|
+
import paddle
|
100
|
+
if isinstance(value, paddle.Tensor):
|
101
|
+
return True
|
102
|
+
if isinstance(value, Shape):
|
103
|
+
return True
|
104
|
+
if isinstance(value, list) or isinstance(value, tuple):
|
105
|
+
for dim in value:
|
106
|
+
if not (isinstance(dim, Dimension) or isinstance(dim, int)):
|
107
|
+
return False
|
108
|
+
return True
|
109
|
+
return False
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import sys
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
|
8
|
+
import openvino as ov
|
9
|
+
from openvino.runtime import Type
|
10
|
+
|
11
|
+
|
12
|
+
def is_type(val):
|
13
|
+
if isinstance(val, (type, Type)):
|
14
|
+
return True
|
15
|
+
if 'tensorflow' in sys.modules:
|
16
|
+
import tensorflow as tf # pylint: disable=import-error
|
17
|
+
if isinstance(val, tf.dtypes.DType):
|
18
|
+
return True
|
19
|
+
if 'torch' in sys.modules:
|
20
|
+
import torch
|
21
|
+
if isinstance(val, torch.dtype):
|
22
|
+
return True
|
23
|
+
if 'paddle' in sys.modules:
|
24
|
+
import paddle
|
25
|
+
if isinstance(val, paddle.dtype):
|
26
|
+
return True
|
27
|
+
return False
|
28
|
+
|
29
|
+
|
30
|
+
def to_ov_type(val):
|
31
|
+
if isinstance(val, Type):
|
32
|
+
return val
|
33
|
+
if isinstance(val, (type, str, np.dtype)):
|
34
|
+
return Type(val)
|
35
|
+
if 'tensorflow' in sys.modules:
|
36
|
+
import tensorflow as tf # pylint: disable=import-error
|
37
|
+
if isinstance(val, tf.dtypes.DType):
|
38
|
+
from openvino.frontend.tensorflow.utils import tf_type_to_ov_type # pylint: disable=no-name-in-module,import-error
|
39
|
+
return tf_type_to_ov_type(val)
|
40
|
+
if 'torch' in sys.modules:
|
41
|
+
import torch
|
42
|
+
|
43
|
+
if isinstance(val, torch.dtype):
|
44
|
+
torch_to_ov_type = {
|
45
|
+
torch.float32: ov.Type.f32,
|
46
|
+
torch.float16: ov.Type.f16,
|
47
|
+
torch.float64: ov.Type.f64,
|
48
|
+
torch.bfloat16: ov.Type.bf16,
|
49
|
+
torch.uint8: ov.Type.u8,
|
50
|
+
torch.int8: ov.Type.i8,
|
51
|
+
torch.int16: ov.Type.i16,
|
52
|
+
torch.int32: ov.Type.i32,
|
53
|
+
torch.int64: ov.Type.i64,
|
54
|
+
torch.bool: ov.Type.boolean,
|
55
|
+
}
|
56
|
+
if val not in torch_to_ov_type:
|
57
|
+
raise Exception("The provided data time is not supported {}.".format(val))
|
58
|
+
|
59
|
+
return torch_to_ov_type[val]
|
60
|
+
|
61
|
+
if 'paddle' in sys.modules:
|
62
|
+
import paddle
|
63
|
+
|
64
|
+
if isinstance(val, paddle.dtype):
|
65
|
+
paddle_to_ov_type = {
|
66
|
+
paddle.float32: ov.Type.f32,
|
67
|
+
paddle.float16: ov.Type.f16,
|
68
|
+
paddle.float64: ov.Type.f64,
|
69
|
+
paddle.bfloat16: ov.Type.bf16,
|
70
|
+
paddle.uint8: ov.Type.u8,
|
71
|
+
paddle.int8: ov.Type.i8,
|
72
|
+
paddle.int16: ov.Type.i16,
|
73
|
+
paddle.int32: ov.Type.i32,
|
74
|
+
paddle.int64: ov.Type.i64,
|
75
|
+
paddle.bool: ov.Type.boolean,
|
76
|
+
}
|
77
|
+
|
78
|
+
if val not in paddle_to_ov_type:
|
79
|
+
raise Exception("The provided data time is not supported {}.".format(val))
|
80
|
+
|
81
|
+
return paddle_to_ov_type[val]
|
82
|
+
raise Exception("Unexpected type object. Expected ov.Type, np.dtype, tf.dtypes.DType. Got {}".format(type(val)))
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
# !/usr/bin/env python3
|
5
|
+
|
6
|
+
import sys
|
7
|
+
|
8
|
+
if __name__ == "__main__":
|
9
|
+
from openvino.tools.ovc.telemetry_utils import init_mo_telemetry
|
10
|
+
from openvino.tools.ovc.main import main
|
11
|
+
|
12
|
+
init_mo_telemetry()
|
13
|
+
sys.exit(main())
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
class Telemetry(object):
|
5
|
+
"""
|
6
|
+
Stab file for the Telemetry class which is used when Telemetry class is not available.
|
7
|
+
"""
|
8
|
+
|
9
|
+
def __init__(self, *arg, **kwargs):
|
10
|
+
pass
|
11
|
+
|
12
|
+
def send_event(self, *arg, **kwargs):
|
13
|
+
pass
|
14
|
+
|
15
|
+
def send_error(self, *arg, **kwargs):
|
16
|
+
pass
|
17
|
+
|
18
|
+
def start_session(self, *arg, **kwargs):
|
19
|
+
pass
|
20
|
+
|
21
|
+
def end_session(self, *arg, **kwargs):
|
22
|
+
pass
|
23
|
+
|
24
|
+
def force_shutdown(self, *arg, **kwargs):
|
25
|
+
pass
|
26
|
+
|
27
|
+
def send_stack_trace(self, *arg, **kwargs):
|
28
|
+
pass
|
@@ -0,0 +1,118 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import argparse
|
5
|
+
import numbers
|
6
|
+
import os
|
7
|
+
from openvino.runtime import get_version as get_rt_version # pylint: disable=no-name-in-module,import-error
|
8
|
+
from openvino.tools.ovc.cli_parser import get_params_with_paths_list
|
9
|
+
from openvino.tools.ovc.telemetry_params import telemetry_params
|
10
|
+
from openvino.tools.ovc.utils import check_values_equal
|
11
|
+
|
12
|
+
try:
|
13
|
+
import openvino_telemetry as tm
|
14
|
+
from openvino_telemetry.backend import backend_ga4
|
15
|
+
except ImportError:
|
16
|
+
import openvino.tools.ovc.telemetry_stub as tm
|
17
|
+
|
18
|
+
|
19
|
+
def is_optimum():
|
20
|
+
import traceback
|
21
|
+
for frame_summary in traceback.extract_stack():
|
22
|
+
if os.path.join("optimum", "intel") in frame_summary.filename or \
|
23
|
+
os.path.join("optimum", "exporters", "openvino") in frame_summary.filename:
|
24
|
+
return True
|
25
|
+
return False
|
26
|
+
|
27
|
+
|
28
|
+
def init_mo_telemetry(app_name='Model Conversion API', app_version=None):
|
29
|
+
app_version = app_version if app_version is not None else get_rt_version()
|
30
|
+
return init_telemetry_class(tid=get_tid(),
|
31
|
+
app_name=app_name,
|
32
|
+
app_version=app_version,
|
33
|
+
backend='ga4',
|
34
|
+
enable_opt_in_dialog=False,
|
35
|
+
disable_in_ci=True
|
36
|
+
)
|
37
|
+
|
38
|
+
|
39
|
+
def init_telemetry_class(tid,
|
40
|
+
app_name,
|
41
|
+
app_version,
|
42
|
+
backend,
|
43
|
+
enable_opt_in_dialog,
|
44
|
+
disable_in_ci):
|
45
|
+
# Init telemetry class
|
46
|
+
telemetry = tm.Telemetry(tid=tid,
|
47
|
+
app_name=app_name,
|
48
|
+
app_version=app_version,
|
49
|
+
backend=backend,
|
50
|
+
enable_opt_in_dialog=enable_opt_in_dialog,
|
51
|
+
disable_in_ci=disable_in_ci)
|
52
|
+
|
53
|
+
# Telemetry is a singleton class and if it was already initialized in another tool
|
54
|
+
# some parameters will be incorrect, including app_name.
|
55
|
+
# In this case we need to force reinitialisation of telemetry.
|
56
|
+
if hasattr(telemetry, "backend") and telemetry.backend.app_name != app_name:
|
57
|
+
telemetry.init(tid=tid,
|
58
|
+
app_name=app_name,
|
59
|
+
app_version=app_version,
|
60
|
+
backend=backend,
|
61
|
+
enable_opt_in_dialog=enable_opt_in_dialog,
|
62
|
+
disable_in_ci=disable_in_ci)
|
63
|
+
return telemetry
|
64
|
+
|
65
|
+
|
66
|
+
def send_framework_info(framework: str):
|
67
|
+
"""
|
68
|
+
This function sends information about used framework.
|
69
|
+
:param framework: framework name.
|
70
|
+
"""
|
71
|
+
t = tm.Telemetry()
|
72
|
+
t.send_event('ovc', 'framework', framework)
|
73
|
+
|
74
|
+
|
75
|
+
def get_tid():
|
76
|
+
"""
|
77
|
+
This function returns the ID of the database to send telemetry.
|
78
|
+
"""
|
79
|
+
return telemetry_params['TID']
|
80
|
+
|
81
|
+
|
82
|
+
def send_conversion_result(conversion_result: str, need_shutdown=False):
|
83
|
+
t = tm.Telemetry()
|
84
|
+
t.send_event('ovc', 'conversion_result', conversion_result)
|
85
|
+
t.end_session('ovc')
|
86
|
+
if need_shutdown:
|
87
|
+
t.force_shutdown(1.0)
|
88
|
+
|
89
|
+
|
90
|
+
def arg_to_str(arg):
|
91
|
+
# This method converts to string only known types, otherwise returns string with name of the type
|
92
|
+
from openvino.runtime import PartialShape, Shape, Type, Layout # pylint: disable=no-name-in-module,import-error
|
93
|
+
if isinstance(arg, (PartialShape, Shape, Type, Layout)):
|
94
|
+
return str(arg)
|
95
|
+
if isinstance(arg, (str, numbers.Number, bool)):
|
96
|
+
return str(arg)
|
97
|
+
return str(type(arg))
|
98
|
+
|
99
|
+
|
100
|
+
def send_params_info(argv: argparse.Namespace, cli_parser: argparse.ArgumentParser):
|
101
|
+
"""
|
102
|
+
This function sends information about used command line parameters.
|
103
|
+
:param argv: command line parameters.
|
104
|
+
:param cli_parser: command line parameters parser.
|
105
|
+
"""
|
106
|
+
t = tm.Telemetry()
|
107
|
+
params_with_paths = get_params_with_paths_list()
|
108
|
+
for arg in vars(argv):
|
109
|
+
arg_value = getattr(argv, arg)
|
110
|
+
if not check_values_equal(arg_value, cli_parser.get_default(arg)):
|
111
|
+
if arg in params_with_paths:
|
112
|
+
# If command line argument value is a directory or a path to file it is not sent
|
113
|
+
# as it may contain confidential information. "1" value is used instead.
|
114
|
+
param_str = arg + ":" + str(1)
|
115
|
+
else:
|
116
|
+
param_str = arg + ":" + arg_to_str(arg_value)
|
117
|
+
|
118
|
+
t.send_event('ovc', 'cli_parameters', param_str)
|
@@ -0,0 +1,109 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import os
|
5
|
+
from typing import Iterable, Union
|
6
|
+
|
7
|
+
import numpy as np
|
8
|
+
from openvino.tools.ovc.error import Error
|
9
|
+
|
10
|
+
try:
|
11
|
+
import openvino_telemetry as tm
|
12
|
+
from openvino_telemetry.backend import backend_ga4
|
13
|
+
except ImportError:
|
14
|
+
import openvino.tools.ovc.telemetry_stub as tm
|
15
|
+
|
16
|
+
dynamic_dimension = np.ma.masked
|
17
|
+
|
18
|
+
|
19
|
+
def refer_to_faq_msg(question_num: int):
|
20
|
+
try:
|
21
|
+
t = tm.Telemetry()
|
22
|
+
t.send_event('ovc', 'error_info', "faq:" + str(question_num))
|
23
|
+
except Exception:
|
24
|
+
# Telemetry can be not initialized if it is used in MO IR Reader
|
25
|
+
pass
|
26
|
+
|
27
|
+
return '\n For more information please refer to Model Conversion API FAQ, question #{0}. ' \
|
28
|
+
'(https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ.html' \
|
29
|
+
'?question={0}#question-{0})'.format(question_num)
|
30
|
+
|
31
|
+
|
32
|
+
def get_mo_root_dir():
|
33
|
+
"""
|
34
|
+
Return the absolute path to the Model Conversion API root directory (where mo folder is located)
|
35
|
+
:return: path to the MO root directory
|
36
|
+
"""
|
37
|
+
return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), os.pardir))
|
38
|
+
|
39
|
+
|
40
|
+
def check_values_equal(val1, val2):
|
41
|
+
# This method is needed to check equality of values where some values can be None
|
42
|
+
if val1 is None and val2 is None:
|
43
|
+
return True
|
44
|
+
if val1 is None:
|
45
|
+
return False
|
46
|
+
if val2 is None:
|
47
|
+
return False
|
48
|
+
return val1 == val2
|
49
|
+
|
50
|
+
|
51
|
+
np_map_cast = {bool: lambda x: bool_cast(x),
|
52
|
+
np.int8: lambda x: np.int8(x),
|
53
|
+
np.int16: lambda x: np.int16(x),
|
54
|
+
np.int32: lambda x: np.int32(x),
|
55
|
+
np.int64: lambda x: np.int64(x),
|
56
|
+
np.uint8: lambda x: np.uint8(x),
|
57
|
+
np.uint16: lambda x: np.uint16(x),
|
58
|
+
np.uint32: lambda x: np.uint32(x),
|
59
|
+
np.uint64: lambda x: np.uint64(x),
|
60
|
+
np.float16: lambda x: np.float16(x),
|
61
|
+
np.float32: lambda x: np.float32(x),
|
62
|
+
np.double: lambda x: np.double(x),
|
63
|
+
str: lambda x: str(x)}
|
64
|
+
|
65
|
+
|
66
|
+
def bool_cast(x):
|
67
|
+
if isinstance(x, str):
|
68
|
+
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
|
69
|
+
else:
|
70
|
+
return bool(x)
|
71
|
+
|
72
|
+
|
73
|
+
def mo_array(value: Union[Iterable[Union[float, int]], float, int], dtype=None) -> np.ndarray:
|
74
|
+
"""
|
75
|
+
This function acts in a same way as np.array except for the case when dtype is not provided
|
76
|
+
and np.array return fp64 array this function returns fp32 array
|
77
|
+
"""
|
78
|
+
x = np.array(value, dtype=dtype)
|
79
|
+
if not isinstance(value, np.ndarray) and x.dtype == np.float64 and dtype != np.float64:
|
80
|
+
x = x.astype(np.float32)
|
81
|
+
return x
|
82
|
+
|
83
|
+
|
84
|
+
def validate_batch_in_shape(shape, layer_name: str):
|
85
|
+
"""
|
86
|
+
Raises Error #39 if shape is not valid for setting batch size
|
87
|
+
Parameters
|
88
|
+
----------
|
89
|
+
shape: current shape of layer under validation
|
90
|
+
layer_name: name of layer under validation
|
91
|
+
"""
|
92
|
+
if len(shape) == 0 or (shape[0] is not dynamic_dimension and shape[0] not in (-1, 0, 1)):
|
93
|
+
raise Error(('The input layer {} has a shape {} defined in the model. \n\n' +
|
94
|
+
'When you use "batch" option, Model Conversion API applies its value to the first ' +
|
95
|
+
'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' +
|
96
|
+
'situation - it is not known in advance whether the layer has the batch ' +
|
97
|
+
'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' +
|
98
|
+
'for the input layer "data" with shape (10,34). Although you can not use "batch", ' +
|
99
|
+
'you should pass "input_shape=[100,34]" instead of "batch=100". \n\n' +
|
100
|
+
'You can also specify batch dimension by setting "layout". \n\n')
|
101
|
+
.format(layer_name, shape))
|
102
|
+
|
103
|
+
|
104
|
+
def get_ir_version():
|
105
|
+
"""
|
106
|
+
Default IR version.
|
107
|
+
:return: the IR version
|
108
|
+
"""
|
109
|
+
return 11
|