bigdl-core-npu 2.6.0b20250114__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl-core-npu/__init__.py +0 -0
- bigdl-core-npu/include/common.h +96 -0
- bigdl-core-npu/include/npu_llm.h +74 -0
- bigdl-core-npu/npu_llm.dll +0 -0
- bigdl-core-npu/npu_llm.lib +0 -0
- bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
- bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
- bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
- bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
- intel_npu_acceleration_library/__init__.py +24 -0
- intel_npu_acceleration_library/_version.py +6 -0
- intel_npu_acceleration_library/backend/__init__.py +37 -0
- intel_npu_acceleration_library/backend/base.py +250 -0
- intel_npu_acceleration_library/backend/bindings.py +383 -0
- intel_npu_acceleration_library/backend/compression.py +24 -0
- intel_npu_acceleration_library/backend/convolution.py +58 -0
- intel_npu_acceleration_library/backend/factory.py +1161 -0
- intel_npu_acceleration_library/backend/linear.py +60 -0
- intel_npu_acceleration_library/backend/matmul.py +59 -0
- intel_npu_acceleration_library/backend/mlp.py +58 -0
- intel_npu_acceleration_library/backend/ops.py +142 -0
- intel_npu_acceleration_library/backend/qlinear.py +75 -0
- intel_npu_acceleration_library/backend/qmatmul.py +66 -0
- intel_npu_acceleration_library/backend/runtime.py +215 -0
- intel_npu_acceleration_library/backend/sdpa.py +107 -0
- intel_npu_acceleration_library/backend/tensor.py +1120 -0
- intel_npu_acceleration_library/backend/utils.py +70 -0
- intel_npu_acceleration_library/compiler.py +194 -0
- intel_npu_acceleration_library/device.py +230 -0
- intel_npu_acceleration_library/dtypes.py +155 -0
- intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
- intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
- intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
- intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
- intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
- intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
- intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
- intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
- intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
- intel_npu_acceleration_library/external/openvino/utils.py +115 -0
- intel_npu_acceleration_library/functional/__init__.py +8 -0
- intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
- intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/modelling.py +150 -0
- intel_npu_acceleration_library/nn/__init__.py +20 -0
- intel_npu_acceleration_library/nn/autograd.py +68 -0
- intel_npu_acceleration_library/nn/conv.py +257 -0
- intel_npu_acceleration_library/nn/functional.py +1207 -0
- intel_npu_acceleration_library/nn/linear.py +162 -0
- intel_npu_acceleration_library/nn/llm.py +417 -0
- intel_npu_acceleration_library/nn/module.py +393 -0
- intel_npu_acceleration_library/optimizations.py +157 -0
- intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,550 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import argparse
|
5
|
+
import datetime
|
6
|
+
import logging as log
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
import traceback
|
10
|
+
import tracemalloc
|
11
|
+
from collections import OrderedDict
|
12
|
+
from pathlib import Path
|
13
|
+
from typing import Iterable, Callable
|
14
|
+
|
15
|
+
|
16
|
+
try:
|
17
|
+
import openvino_telemetry as tm
|
18
|
+
from openvino_telemetry.backend import backend_ga4
|
19
|
+
except ImportError:
|
20
|
+
import openvino.tools.ovc.telemetry_stub as tm
|
21
|
+
|
22
|
+
from openvino.tools.ovc.moc_frontend.check_config import any_extensions_used
|
23
|
+
from openvino.tools.ovc.moc_frontend.pipeline import moc_pipeline
|
24
|
+
from openvino.tools.ovc.moc_frontend.moc_emit_ir import moc_emit_ir
|
25
|
+
from openvino.tools.ovc.moc_frontend.type_utils import to_ov_type
|
26
|
+
from openvino.tools.ovc.cli_parser import get_available_front_ends, get_common_cli_options, depersonalize, \
|
27
|
+
get_mo_convert_params, input_to_input_cut_info, parse_inputs
|
28
|
+
from openvino.tools.ovc.help import get_convert_model_help_specifics
|
29
|
+
|
30
|
+
from openvino.tools.ovc.error import Error, FrameworkError
|
31
|
+
from openvino.tools.ovc.get_ov_update_message import get_compression_message
|
32
|
+
from openvino.tools.ovc.version import VersionChecker
|
33
|
+
from openvino.tools.ovc.utils import check_values_equal
|
34
|
+
from openvino.tools.ovc.logger import init_logger
|
35
|
+
from openvino.tools.ovc.telemetry_utils import send_params_info, send_conversion_result, \
|
36
|
+
init_mo_telemetry
|
37
|
+
from openvino.tools.ovc.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example
|
38
|
+
from openvino.tools.ovc.moc_frontend.paddle_frontend_utils import paddle_frontend_converter
|
39
|
+
try:
|
40
|
+
from openvino.tools.ovc.moc_frontend.jax_frontend_utils import get_jax_decoder
|
41
|
+
except:
|
42
|
+
get_jax_decoder = None
|
43
|
+
|
44
|
+
# pylint: disable=no-name-in-module,import-error
|
45
|
+
from openvino.frontend import FrontEndManager, OpConversionFailure, TelemetryExtension
|
46
|
+
from openvino.runtime import get_version as get_rt_version
|
47
|
+
from openvino.runtime import PartialShape
|
48
|
+
|
49
|
+
try:
|
50
|
+
from openvino.frontend.tensorflow.utils import create_tf_graph_iterator, type_supported_by_tf_fe, \
|
51
|
+
extract_model_graph # pylint: disable=no-name-in-module,import-error
|
52
|
+
|
53
|
+
tf_frontend_with_python_bindings_installed = True
|
54
|
+
except (ModuleNotFoundError, ImportError):
|
55
|
+
tf_frontend_with_python_bindings_installed = False
|
56
|
+
|
57
|
+
|
58
|
+
def replace_ext(name: str, old: str, new: str):
|
59
|
+
base, ext = os.path.splitext(name)
|
60
|
+
log.debug("base: {}, ext: {}".format(base, ext))
|
61
|
+
if ext == old:
|
62
|
+
return base + new
|
63
|
+
|
64
|
+
|
65
|
+
def print_argv(argv: argparse.Namespace):
|
66
|
+
print('Model Conversion arguments:')
|
67
|
+
props = OrderedDict()
|
68
|
+
props['common_args'] = get_common_cli_options(argv, argv.is_python_api_used)
|
69
|
+
|
70
|
+
framework_specifics_map = {
|
71
|
+
'common_args': 'Common parameters:'
|
72
|
+
}
|
73
|
+
|
74
|
+
lines = []
|
75
|
+
for key in props:
|
76
|
+
lines.append(framework_specifics_map[key])
|
77
|
+
for (op, desc) in props[key].items():
|
78
|
+
if isinstance(desc, list):
|
79
|
+
lines.append('\t{}: \t{}'.format(desc[0], desc[1](getattr(argv, op, 'NONE'))))
|
80
|
+
else:
|
81
|
+
lines.append('\t{}: \t{}'.format(desc, getattr(argv, op, 'NONE')))
|
82
|
+
print('\n'.join(lines), flush=True)
|
83
|
+
|
84
|
+
|
85
|
+
def check_iterable(iterable: Iterable, func: Callable):
|
86
|
+
for element in iterable:
|
87
|
+
if not func(element):
|
88
|
+
return False
|
89
|
+
return True
|
90
|
+
|
91
|
+
|
92
|
+
def arguments_post_parsing(argv: argparse.Namespace):
|
93
|
+
# TODO: This function looks similar to another one. Check for code duplicates.
|
94
|
+
log.debug("Model Conversion API started")
|
95
|
+
if not argv.is_python_api_used:
|
96
|
+
log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.output_model))
|
97
|
+
|
98
|
+
if is_verbose(argv):
|
99
|
+
print_argv(argv)
|
100
|
+
|
101
|
+
import re
|
102
|
+
if argv.is_python_api_used and isinstance(argv.input, str):
|
103
|
+
argv.input = [argv.input]
|
104
|
+
|
105
|
+
if not argv.is_python_api_used and isinstance(argv.input, str):
|
106
|
+
argv.input = parse_inputs(argv.input)
|
107
|
+
|
108
|
+
normalize_inputs(argv)
|
109
|
+
log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))
|
110
|
+
|
111
|
+
if not hasattr(argv, 'output') or argv.output is None:
|
112
|
+
return argv
|
113
|
+
|
114
|
+
if argv.is_python_api_used:
|
115
|
+
error_msg = f"output '{argv.output}' is incorrect, it should be string or a list/tuple of strings"
|
116
|
+
assert isinstance(argv.output, (str, list, tuple)), error_msg
|
117
|
+
if isinstance(argv.output, list):
|
118
|
+
assert check_iterable(argv.output, lambda x: isinstance(x, str)), error_msg
|
119
|
+
else:
|
120
|
+
argv.output = [argv.output]
|
121
|
+
else:
|
122
|
+
assert isinstance(argv.output, str)
|
123
|
+
|
124
|
+
error_msg = f"output '{argv.output}' is incorrect, output names should not be empty or contain spaces"
|
125
|
+
processed_output = re.split(r'\s*,\s*', argv.output.strip())
|
126
|
+
assert check_iterable(processed_output, lambda x: x.find(' ') == -1), error_msg
|
127
|
+
assert check_iterable(processed_output, lambda x: len(x) > 0), error_msg
|
128
|
+
argv.output = processed_output
|
129
|
+
return argv
|
130
|
+
|
131
|
+
|
132
|
+
def get_moc_frontends(argv: argparse.Namespace):
|
133
|
+
fem = argv.feManager
|
134
|
+
|
135
|
+
if not fem:
|
136
|
+
return None, []
|
137
|
+
|
138
|
+
available_moc_front_ends = get_available_front_ends(fem)
|
139
|
+
if argv.framework:
|
140
|
+
moc_front_end = fem.load_by_framework(argv.framework)
|
141
|
+
return moc_front_end, available_moc_front_ends
|
142
|
+
if argv.input_model:
|
143
|
+
if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2:
|
144
|
+
moc_front_end = fem.load_by_model(
|
145
|
+
[argv.input_model[0], argv.input_model[1]]) # TODO: Pass all input model parts
|
146
|
+
else:
|
147
|
+
moc_front_end = fem.load_by_model(argv.input_model)
|
148
|
+
if not moc_front_end:
|
149
|
+
return None, available_moc_front_ends
|
150
|
+
argv.framework = moc_front_end.get_name()
|
151
|
+
else:
|
152
|
+
return None, []
|
153
|
+
|
154
|
+
# This check as a workaround to skip IR frontend
|
155
|
+
if not moc_front_end.get_name() in available_moc_front_ends:
|
156
|
+
return None, available_moc_front_ends
|
157
|
+
|
158
|
+
return moc_front_end, available_moc_front_ends
|
159
|
+
|
160
|
+
|
161
|
+
def filtered_extensions(extensions):
|
162
|
+
try:
|
163
|
+
new_extensions = []
|
164
|
+
from openvino.frontend.pytorch.module_extension import ModuleExtension
|
165
|
+
for ext in extensions:
|
166
|
+
if not isinstance(ext, ModuleExtension):
|
167
|
+
new_extensions.append(ext)
|
168
|
+
return new_extensions
|
169
|
+
except:
|
170
|
+
return extensions
|
171
|
+
|
172
|
+
|
173
|
+
def prepare_ir(argv: argparse.Namespace):
|
174
|
+
argv = arguments_post_parsing(argv)
|
175
|
+
t = tm.Telemetry()
|
176
|
+
|
177
|
+
if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 1:
|
178
|
+
argv.input_model = argv.input_model[0]
|
179
|
+
|
180
|
+
moc_front_end, available_moc_front_ends = get_moc_frontends(argv)
|
181
|
+
if moc_front_end:
|
182
|
+
# TODO: Should be moved to the same place where paddle and pytorch handle their objects
|
183
|
+
if argv.framework == 'tf' and argv.is_python_object and type_supported_by_tf_fe(argv.input_model):
|
184
|
+
argv.input_model = create_tf_graph_iterator(argv.input_model,
|
185
|
+
argv.placeholder_shapes,
|
186
|
+
argv.placeholder_data_types,
|
187
|
+
getattr(argv, "example_input", None),
|
188
|
+
argv.share_weights)
|
189
|
+
t.send_event("ovc", "conversion_method", moc_front_end.get_name() + "_frontend")
|
190
|
+
moc_front_end.add_extension(TelemetryExtension("ovc", t.send_event, t.send_error, t.send_stack_trace))
|
191
|
+
if any_extensions_used(argv):
|
192
|
+
for extension in filtered_extensions(argv.extension):
|
193
|
+
moc_front_end.add_extension(extension)
|
194
|
+
ov_model = moc_pipeline(argv, moc_front_end)
|
195
|
+
return ov_model
|
196
|
+
|
197
|
+
if not argv.input_model:
|
198
|
+
raise Error('No input model is provided')
|
199
|
+
|
200
|
+
raise Error('Cannot recognize input model.')
|
201
|
+
|
202
|
+
|
203
|
+
def check_model_object(argv):
|
204
|
+
model = argv['input_model']
|
205
|
+
if 'tensorflow' in sys.modules:
|
206
|
+
if tf_frontend_with_python_bindings_installed and extract_model_graph(argv):
|
207
|
+
return "tf"
|
208
|
+
if 'torch' in sys.modules:
|
209
|
+
import torch
|
210
|
+
if isinstance(model, (torch.nn.Module, torch.jit.ScriptFunction)) or (hasattr(torch, "export") and isinstance(model, (torch.export.ExportedProgram))):
|
211
|
+
return "pytorch"
|
212
|
+
try:
|
213
|
+
from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder
|
214
|
+
from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder
|
215
|
+
|
216
|
+
if isinstance(model, (TorchScriptPythonDecoder, TorchFXPythonDecoder)):
|
217
|
+
return "pytorch"
|
218
|
+
except Exception as e:
|
219
|
+
pass
|
220
|
+
|
221
|
+
import io
|
222
|
+
# FIXME: Consuming any io.BytesIO object as an ONNX model is too dengerous and
|
223
|
+
# can conflict with others in the future (not future proof).
|
224
|
+
# TODO: Refer to https://onnx.ai/onnx/intro/python.html to find examples with
|
225
|
+
# real ONNX python objects. ONNX model has onnx.onnx_ml_pb2.ModelProto type.
|
226
|
+
if isinstance(model, io.BytesIO):
|
227
|
+
return 'onnx'
|
228
|
+
|
229
|
+
if 'paddle' in sys.modules:
|
230
|
+
import paddle
|
231
|
+
if isinstance(model, paddle.hapi.model.Model) or isinstance(model,
|
232
|
+
paddle.fluid.dygraph.layers.Layer) or isinstance(
|
233
|
+
model, paddle.fluid.executor.Executor):
|
234
|
+
return "paddle"
|
235
|
+
|
236
|
+
if 'jax' in sys.modules:
|
237
|
+
import jax
|
238
|
+
if isinstance(model, (jax.core.Jaxpr, jax.core.ClosedJaxpr)):
|
239
|
+
return "jax"
|
240
|
+
|
241
|
+
raise Error('Unknown model type: {}'.format(type(model)))
|
242
|
+
|
243
|
+
|
244
|
+
def driver(argv: argparse.Namespace, non_default_params: dict):
|
245
|
+
init_logger('ERROR', argv.verbose)
|
246
|
+
|
247
|
+
# Log dictionary with non-default cli parameters where complex classes are excluded.
|
248
|
+
log.debug(str(non_default_params))
|
249
|
+
|
250
|
+
ov_model = moc_emit_ir(prepare_ir(argv), argv)
|
251
|
+
|
252
|
+
return ov_model
|
253
|
+
|
254
|
+
|
255
|
+
def get_non_default_params(argv, cli_parser):
|
256
|
+
import numbers
|
257
|
+
import inspect
|
258
|
+
from openvino.tools.ovc import convert_model
|
259
|
+
|
260
|
+
signature = inspect.signature(convert_model)
|
261
|
+
# make dictionary with parameters which have non-default values to be serialized in IR in rt_info
|
262
|
+
non_default_params = {}
|
263
|
+
for arg, arg_value in vars(argv).items():
|
264
|
+
if arg in signature.parameters and check_values_equal(arg_value, signature.parameters[arg].default):
|
265
|
+
continue
|
266
|
+
if check_values_equal(arg_value, cli_parser.get_default(arg)):
|
267
|
+
continue
|
268
|
+
value = depersonalize(arg_value, arg)
|
269
|
+
# Skip complex classes in params to prevent
|
270
|
+
# serializing it to rt_info
|
271
|
+
if isinstance(value, (str, bool, numbers.Number)):
|
272
|
+
non_default_params[arg] = value
|
273
|
+
return non_default_params
|
274
|
+
|
275
|
+
|
276
|
+
def add_line_breaks(text: str, char_num: int, line_break: str):
|
277
|
+
words = text.replace('\n', "\n ").split(" ")
|
278
|
+
cnt = 0
|
279
|
+
for i, w in enumerate(words):
|
280
|
+
cnt += len(w)
|
281
|
+
if '\n' in w:
|
282
|
+
cnt = len(w) - w.find('\n') - 1
|
283
|
+
if cnt > char_num:
|
284
|
+
if words[i][-1] not in ['\n', '\t']:
|
285
|
+
words[i] = w + '\n'
|
286
|
+
cnt = 0
|
287
|
+
text = ' '.join(words).replace("\n ", "\n")
|
288
|
+
return line_break + text.replace("\n", line_break)
|
289
|
+
|
290
|
+
|
291
|
+
def show_mo_convert_help():
|
292
|
+
mo_convert_params = get_mo_convert_params()
|
293
|
+
for group_name, group in mo_convert_params.items():
|
294
|
+
print(group_name)
|
295
|
+
for param_name in group:
|
296
|
+
param_data = group[param_name]
|
297
|
+
text = param_data.description.replace(" ", '')
|
298
|
+
text = add_line_breaks(text, 56, "\n\t\t\t")
|
299
|
+
print(" :param {} {}".format(param_name, text))
|
300
|
+
print()
|
301
|
+
|
302
|
+
|
303
|
+
def input_model_is_object(input_model):
|
304
|
+
if input_model == ():
|
305
|
+
return False
|
306
|
+
if isinstance(input_model, (str, Path)):
|
307
|
+
return False
|
308
|
+
if isinstance(input_model, (tuple, list)):
|
309
|
+
return all(input_model_is_object(part) for part in input_model)
|
310
|
+
return True
|
311
|
+
|
312
|
+
|
313
|
+
def normalize_inputs(argv: argparse.Namespace):
|
314
|
+
"""
|
315
|
+
repacks params passed to convert_model and wraps resulting values into dictionaries or lists.
|
316
|
+
After working of this method following values are set in argv:
|
317
|
+
|
318
|
+
argv.input, argv.inputs_list - list of input names. Both values are used in some parts of MO.
|
319
|
+
Could be good to refactor it and use only one of these values.
|
320
|
+
|
321
|
+
argv.placeholder_shapes - dictionary where key is node name, value is PartialShape,
|
322
|
+
or list of PartialShape if node names were not set.
|
323
|
+
|
324
|
+
argv.placeholder_data_types - dictionary where key is node name, value is node np.type,
|
325
|
+
or list of np.types if node names were not set.
|
326
|
+
|
327
|
+
:param argv: MO arguments
|
328
|
+
"""
|
329
|
+
# Parse input to list of InputCutInfo
|
330
|
+
inputs = input_to_input_cut_info(argv.input)
|
331
|
+
argv.input = inputs
|
332
|
+
|
333
|
+
# Make list of input names
|
334
|
+
input_names_list = []
|
335
|
+
for inp in inputs:
|
336
|
+
if inp.name is not None:
|
337
|
+
input_names_list.append(inp.name)
|
338
|
+
if len(input_names_list) > 0:
|
339
|
+
assert len(input_names_list) == len(inputs), "\"input\" parameter has unnamed inputs and named inputs. " \
|
340
|
+
"Please either set names for all inputs, " \
|
341
|
+
"or do not set names for all inputs."
|
342
|
+
|
343
|
+
if len(input_names_list) > 0:
|
344
|
+
# Named inputs case
|
345
|
+
shape_dict = {}
|
346
|
+
data_type_dict = {}
|
347
|
+
for inp in inputs:
|
348
|
+
if inp.shape is not None:
|
349
|
+
# Wrap shape to PartialShape for uniformity of stored values
|
350
|
+
shape_dict[inp.name] = PartialShape(inp.shape)
|
351
|
+
else:
|
352
|
+
shape_dict[inp.name] = None
|
353
|
+
if inp.type is not None:
|
354
|
+
# Convert type to ov.Type for uniformity of stored values
|
355
|
+
data_type_dict[inp.name] = to_ov_type(inp.type)
|
356
|
+
argv.placeholder_shapes = shape_dict if shape_dict else None
|
357
|
+
argv.placeholder_data_types = data_type_dict if data_type_dict else {}
|
358
|
+
else:
|
359
|
+
# Unnamed inputs case
|
360
|
+
shape_list = []
|
361
|
+
data_type_list = []
|
362
|
+
for inp in inputs:
|
363
|
+
if inp.shape is not None:
|
364
|
+
# Wrap shape to PartialShape for uniformity of stored values
|
365
|
+
shape_list.append(PartialShape(inp.shape))
|
366
|
+
if inp.type is not None:
|
367
|
+
# Convert type to ov.Type for uniformity of stored values
|
368
|
+
data_type_list.append(to_ov_type(inp.type))
|
369
|
+
argv.placeholder_shapes = shape_list if shape_list else None
|
370
|
+
argv.placeholder_data_types = data_type_list if data_type_list else {}
|
371
|
+
if hasattr(argv, "framework") and argv.framework == "pytorch" and getattr(argv, "example_input", None) is not None:
|
372
|
+
extract_input_info_from_example(argv, inputs)
|
373
|
+
|
374
|
+
|
375
|
+
def args_to_argv(**kwargs):
|
376
|
+
argv = argparse.Namespace()
|
377
|
+
args_specifics = get_convert_model_help_specifics()
|
378
|
+
|
379
|
+
import inspect
|
380
|
+
from openvino.tools.ovc import convert_model
|
381
|
+
signature = inspect.signature(convert_model)
|
382
|
+
for key, value in kwargs.items():
|
383
|
+
if value is None and key in signature.parameters:
|
384
|
+
setattr(argv, key, signature.parameters[key].default)
|
385
|
+
continue
|
386
|
+
if key in args_specifics:
|
387
|
+
param_specifics = args_specifics[key]
|
388
|
+
if 'action' in param_specifics and hasattr(param_specifics['action'], 'check_value'):
|
389
|
+
value = param_specifics['action'].check_value(value, key)
|
390
|
+
if 'type' in param_specifics:
|
391
|
+
value = param_specifics['type'](value)
|
392
|
+
setattr(argv, key, value)
|
393
|
+
return argv
|
394
|
+
|
395
|
+
|
396
|
+
def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParser, python_api_used):
|
397
|
+
if python_api_used:
|
398
|
+
argv = args_to_argv(**args)
|
399
|
+
|
400
|
+
# get list of all available params for convert_model()
|
401
|
+
all_params = {}
|
402
|
+
for key, value in get_mo_convert_params().items():
|
403
|
+
all_params.update(value)
|
404
|
+
|
405
|
+
# check that there are no unknown params provided
|
406
|
+
for key, value in args.items():
|
407
|
+
if key not in all_params.keys():
|
408
|
+
raise Error("Unrecognized argument: {}".format(key))
|
409
|
+
else:
|
410
|
+
argv = cli_parser.parse_args()
|
411
|
+
return argv
|
412
|
+
|
413
|
+
|
414
|
+
def is_verbose(argv, args=None):
|
415
|
+
if argv is not None and hasattr(argv, 'verbose') and argv.verbose:
|
416
|
+
return True
|
417
|
+
if args is not None and 'verbose' in args and args['verbose']:
|
418
|
+
return True
|
419
|
+
if '--verbose' in sys.argv:
|
420
|
+
return True
|
421
|
+
return False
|
422
|
+
|
423
|
+
|
424
|
+
def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
425
|
+
start_time = datetime.datetime.now()
|
426
|
+
if is_verbose(None, args):
|
427
|
+
tracemalloc.start()
|
428
|
+
|
429
|
+
simplified_ie_version = VersionChecker().get_ie_simplified_version()
|
430
|
+
telemetry = init_mo_telemetry()
|
431
|
+
telemetry.start_session('ovc')
|
432
|
+
telemetry.send_event('ovc', 'version', simplified_ie_version)
|
433
|
+
# Initialize logger with 'ERROR' as default level to be able to form nice messages
|
434
|
+
# before arg parser deliver log_level requested by user
|
435
|
+
init_logger('ERROR', False)
|
436
|
+
argv = None
|
437
|
+
# Minimize modifications among other places in case if multiple pieces are passed as input_model
|
438
|
+
if python_api_used:
|
439
|
+
if 'input_model' not in args:
|
440
|
+
args['input_model'] = ()
|
441
|
+
if isinstance(args['input_model'], (tuple, list)) and len(args['input_model']) == 1:
|
442
|
+
args['input_model'] = args['input_model'][0]
|
443
|
+
try:
|
444
|
+
model_framework = None
|
445
|
+
inp_model_is_object = input_model_is_object(args['input_model']) if python_api_used else False
|
446
|
+
|
447
|
+
if inp_model_is_object:
|
448
|
+
model_framework = check_model_object(args)
|
449
|
+
if model_framework == "pytorch":
|
450
|
+
example_inputs = None
|
451
|
+
if 'example_input' in args and args['example_input'] is not None:
|
452
|
+
example_inputs = args['example_input']
|
453
|
+
elif 'example_inputs' in args:
|
454
|
+
raise AssertionError(
|
455
|
+
"'example_inputs' argument is not recognized, maybe you meant to provide 'example_input'?")
|
456
|
+
|
457
|
+
get_pytorch_decoder(args['input_model'], example_inputs, args)
|
458
|
+
if model_framework == "paddle":
|
459
|
+
example_inputs = None
|
460
|
+
if 'example_input' in args and args['example_input'] is not None:
|
461
|
+
example_inputs = args['example_input']
|
462
|
+
|
463
|
+
outputs = None
|
464
|
+
if 'output' in args and args['output'] is not None:
|
465
|
+
# Once the temporary PDPD model is generated. output can be dropped.
|
466
|
+
# Just swap outputs and args['output'] can reset the argv.output to `None`.
|
467
|
+
# It can avoid the following `output` negative effect.
|
468
|
+
outputs, args['output'] = args['output'], outputs
|
469
|
+
paddle_runtime_converter = paddle_frontend_converter(args['input_model'], example_inputs,
|
470
|
+
outputs)
|
471
|
+
pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel()
|
472
|
+
args['input_model'] = pdmodel
|
473
|
+
if model_framework == "jax":
|
474
|
+
if get_jax_decoder is not None:
|
475
|
+
get_jax_decoder(args['input_model'], args)
|
476
|
+
else:
|
477
|
+
raise Error("JAX Frontend is not available.")
|
478
|
+
|
479
|
+
|
480
|
+
argv = pack_params_to_args_namespace(args, cli_parser, python_api_used)
|
481
|
+
argv.framework = model_framework
|
482
|
+
argv.is_python_object = inp_model_is_object
|
483
|
+
|
484
|
+
argv.feManager = FrontEndManager()
|
485
|
+
|
486
|
+
# send telemetry with params info
|
487
|
+
send_params_info(argv, cli_parser)
|
488
|
+
|
489
|
+
non_default_params = get_non_default_params(argv, cli_parser)
|
490
|
+
argv.is_python_api_used = python_api_used
|
491
|
+
|
492
|
+
argv.framework = model_framework
|
493
|
+
|
494
|
+
ov_model = driver(argv, {"conversion_parameters": non_default_params})
|
495
|
+
|
496
|
+
if inp_model_is_object and model_framework == "paddle":
|
497
|
+
if paddle_runtime_converter:
|
498
|
+
paddle_runtime_converter.destroy()
|
499
|
+
|
500
|
+
# add MO meta data to model
|
501
|
+
ov_model.set_rt_info(get_rt_version(), "Runtime_version")
|
502
|
+
for key, value in non_default_params.items():
|
503
|
+
ov_model.set_rt_info(str(value), ["conversion_parameters", str(key)])
|
504
|
+
|
505
|
+
if is_verbose(argv) or not python_api_used:
|
506
|
+
if 'compress_to_fp16' in argv and argv.compress_to_fp16:
|
507
|
+
print(get_compression_message())
|
508
|
+
|
509
|
+
send_conversion_result('success')
|
510
|
+
|
511
|
+
if is_verbose(argv):
|
512
|
+
elapsed_time = datetime.datetime.now() - start_time
|
513
|
+
print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format(elapsed_time.total_seconds()))
|
514
|
+
|
515
|
+
_, peak_size = tracemalloc.get_traced_memory()
|
516
|
+
print("[ SUCCESS ] Peak memory consumption (includes only memory allocated in Python): {:.2f} MB. ".format(
|
517
|
+
peak_size / (1024 * 1024)))
|
518
|
+
tracemalloc.stop()
|
519
|
+
|
520
|
+
return ov_model, argv
|
521
|
+
|
522
|
+
except Exception as e:
|
523
|
+
if is_verbose(argv) or not python_api_used:
|
524
|
+
if isinstance(e, (FileNotFoundError, NotADirectoryError)):
|
525
|
+
log.error('File {} was not found'.format(str(e).split('No such file or directory:')[1]))
|
526
|
+
log.debug(traceback.format_exc())
|
527
|
+
elif isinstance(e, (Error, OpConversionFailure)):
|
528
|
+
log.error(e)
|
529
|
+
log.debug(traceback.format_exc())
|
530
|
+
elif isinstance(e, FrameworkError):
|
531
|
+
log.error(e, extra={'framework_error': True})
|
532
|
+
log.debug(traceback.format_exc())
|
533
|
+
else:
|
534
|
+
log.error("-------------------------------------------------")
|
535
|
+
log.error("----------------- INTERNAL ERROR ----------------")
|
536
|
+
log.error("Unexpected exception happened.")
|
537
|
+
log.error("Please verify parameters and environment.")
|
538
|
+
log.error("If you think this is a bug, please create new ticket here: ")
|
539
|
+
log.error("https://github.com/openvinotoolkit/openvino/issues.")
|
540
|
+
log.error("-------------- DETAILED INFORMATION -------------")
|
541
|
+
log.error(str(e))
|
542
|
+
log.error(traceback.format_exc())
|
543
|
+
log.error("----------------- END OF REPORT -----------------")
|
544
|
+
log.error("-------------------------------------------------")
|
545
|
+
|
546
|
+
send_conversion_result('fail')
|
547
|
+
if python_api_used:
|
548
|
+
raise e
|
549
|
+
else:
|
550
|
+
return None, argv
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
|
7
|
+
# do not print INFO and WARNING messages from TensorFlow
|
8
|
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
9
|
+
|
10
|
+
|
11
|
+
def get_imported_module_version(imported_module):
|
12
|
+
"""
|
13
|
+
Get imported module version
|
14
|
+
:return: version(str) or raise AttributeError exception
|
15
|
+
"""
|
16
|
+
version_attrs = ("__version__", "VERSION", "version")
|
17
|
+
installed_version = None
|
18
|
+
for attr in version_attrs:
|
19
|
+
installed_version = getattr(imported_module, attr, None)
|
20
|
+
if isinstance(installed_version, str):
|
21
|
+
return installed_version
|
22
|
+
else:
|
23
|
+
installed_version = None
|
24
|
+
|
25
|
+
if installed_version is None:
|
26
|
+
raise AttributeError("{} module doesn't have version attribute".format(imported_module))
|
27
|
+
else:
|
28
|
+
return installed_version
|
29
|
+
|
30
|
+
|
31
|
+
def get_environment_setup(framework):
|
32
|
+
"""
|
33
|
+
Get environment setup such as Python version, TensorFlow version
|
34
|
+
:param framework: framework name
|
35
|
+
:return: a dictionary of environment variables
|
36
|
+
"""
|
37
|
+
env_setup = dict()
|
38
|
+
python_version = "{}.{}.{}".format(sys.version_info.major,
|
39
|
+
sys.version_info.minor,
|
40
|
+
sys.version_info.micro)
|
41
|
+
env_setup['python_version'] = python_version
|
42
|
+
try:
|
43
|
+
if framework == 'tf':
|
44
|
+
exec("import tensorflow")
|
45
|
+
env_setup['tensorflow'] = get_imported_module_version(sys.modules["tensorflow"])
|
46
|
+
exec("del tensorflow")
|
47
|
+
except (AttributeError, ImportError):
|
48
|
+
pass
|
49
|
+
env_setup['sys_platform'] = sys.platform
|
50
|
+
return env_setup
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import re
|
5
|
+
|
6
|
+
|
7
|
+
class BasicError(Exception):
|
8
|
+
""" Base class for all exceptions in Model Conversion API
|
9
|
+
|
10
|
+
It operates like Exception but when it is converted to str,
|
11
|
+
it formats string as args[0].format(*args[1:]), where
|
12
|
+
args are arguments provided when an exception instance is
|
13
|
+
created.
|
14
|
+
"""
|
15
|
+
|
16
|
+
def __str__(self):
|
17
|
+
if len(self.args) <= 1:
|
18
|
+
return Exception.__str__(self)
|
19
|
+
return self.args[0].format(*self.args[1:]) # pylint: disable=unsubscriptable-object
|
20
|
+
|
21
|
+
|
22
|
+
class FrameworkError(BasicError):
|
23
|
+
""" User-friendly error: raised when the error on the framework side. """
|
24
|
+
pass
|
25
|
+
|
26
|
+
|
27
|
+
class Error(BasicError):
|
28
|
+
""" User-friendly error: raised when the error on the user side. """
|
29
|
+
pass
|
30
|
+
|
31
|
+
|
32
|
+
class InternalError(BasicError):
|
33
|
+
""" Not user-friendly error: user cannot fix it and it points to the bug inside MO. """
|
34
|
+
pass
|
35
|
+
|
36
|
+
|
37
|
+
def classify_error_type(e):
|
38
|
+
patterns = [
|
39
|
+
# Example: No module named 'openvino._offline_transformations.offline_transformations_api'
|
40
|
+
r"No module named \'\S+\'",
|
41
|
+
# Example: cannot import name 'IECore' from 'openvino.inference_engine' (unknown location)
|
42
|
+
r"cannot import name \'\S+\'",
|
43
|
+
]
|
44
|
+
error_message = str(e)
|
45
|
+
for pattern in patterns:
|
46
|
+
m = re.search(pattern, error_message)
|
47
|
+
if m:
|
48
|
+
return m.group(0)
|
49
|
+
return "undefined"
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import datetime
|
5
|
+
|
6
|
+
msg_fmt = 'Check for a new version of Intel(R) Distribution of OpenVINO(TM) toolkit here {0} ' \
|
7
|
+
'or on https://github.com/openvinotoolkit/openvino'
|
8
|
+
|
9
|
+
|
10
|
+
def get_compression_message():
|
11
|
+
link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html"
|
12
|
+
message = '[ INFO ] Generated IR will be compressed to FP16. ' \
|
13
|
+
'If you get lower accuracy, please consider disabling compression ' \
|
14
|
+
'by removing argument "compress_to_fp16" or set it to false "compress_to_fp16=False".\n' \
|
15
|
+
'Find more information about compression to FP16 at {}'.format(link)
|
16
|
+
return message
|