bigdl-core-npu 2.5.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl_core_npu-2.5.0.dist-info/METADATA +35 -0
- bigdl_core_npu-2.5.0.dist-info/RECORD +223 -0
- bigdl_core_npu-2.5.0.dist-info/WHEEL +5 -0
- bigdl_core_npu-2.5.0.dist-info/top_level.txt +1 -0
- intel_npu_acceleration_library/__init__.py +24 -0
- intel_npu_acceleration_library/_version.py +6 -0
- intel_npu_acceleration_library/backend/__init__.py +37 -0
- intel_npu_acceleration_library/backend/base.py +215 -0
- intel_npu_acceleration_library/backend/bindings.py +279 -0
- intel_npu_acceleration_library/backend/compression.py +24 -0
- intel_npu_acceleration_library/backend/convolution.py +58 -0
- intel_npu_acceleration_library/backend/factory.py +944 -0
- intel_npu_acceleration_library/backend/linear.py +60 -0
- intel_npu_acceleration_library/backend/matmul.py +59 -0
- intel_npu_acceleration_library/backend/mlp.py +58 -0
- intel_npu_acceleration_library/backend/ops.py +141 -0
- intel_npu_acceleration_library/backend/qlinear.py +71 -0
- intel_npu_acceleration_library/backend/qmatmul.py +66 -0
- intel_npu_acceleration_library/backend/runtime.py +210 -0
- intel_npu_acceleration_library/backend/sdpa.py +107 -0
- intel_npu_acceleration_library/backend/tensor.py +1050 -0
- intel_npu_acceleration_library/backend/utils.py +70 -0
- intel_npu_acceleration_library/compiler.py +194 -0
- intel_npu_acceleration_library/device.py +230 -0
- intel_npu_acceleration_library/dtypes.py +122 -0
- intel_npu_acceleration_library/external/openvino/__init__.py +71 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
- intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +352 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +139 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +98 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +119 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +289 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +118 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +536 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +256 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +460 -0
- intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
- intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +26 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +4 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +21 -0
- intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
- intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +18 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3067 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +399 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +10 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +189 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +783 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +38 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +429 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +70 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
- intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +536 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +35 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +246 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +205 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
- intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
- intel_npu_acceleration_library/external/openvino/utils.py +98 -0
- intel_npu_acceleration_library/functional/__init__.py +8 -0
- intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
- intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/modelling.py +150 -0
- intel_npu_acceleration_library/nn/__init__.py +20 -0
- intel_npu_acceleration_library/nn/autograd.py +68 -0
- intel_npu_acceleration_library/nn/conv.py +257 -0
- intel_npu_acceleration_library/nn/functional.py +1207 -0
- intel_npu_acceleration_library/nn/linear.py +162 -0
- intel_npu_acceleration_library/nn/llm.py +417 -0
- intel_npu_acceleration_library/nn/module.py +393 -0
- intel_npu_acceleration_library/optimizations.py +157 -0
- intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,199 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import sys, argparse
|
5
|
+
|
6
|
+
from openvino.tools.benchmark.utils.utils import show_available_devices
|
7
|
+
|
8
|
+
INPUT_OUTPUT_PRECISION_CHOICES = [
|
9
|
+
'bool', \
|
10
|
+
'f16', 'f32', 'f64', \
|
11
|
+
'i8', 'i16', 'i32', 'i64', \
|
12
|
+
'u8', 'u16', 'u32', 'u64']
|
13
|
+
|
14
|
+
def str2bool(v):
|
15
|
+
if v.lower() in ('yes', 'true', 't', 'y', '1'):
|
16
|
+
return True
|
17
|
+
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
|
18
|
+
return False
|
19
|
+
else:
|
20
|
+
raise argparse.ArgumentTypeError('Boolean value expected.')
|
21
|
+
|
22
|
+
def check_positive(value):
|
23
|
+
ivalue = int(value)
|
24
|
+
if ivalue <= 0:
|
25
|
+
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
|
26
|
+
return ivalue
|
27
|
+
|
28
|
+
class print_help(argparse.Action):
|
29
|
+
def __call__(self, parser, namespace, values, option_string=None):
|
30
|
+
parser.print_help()
|
31
|
+
show_available_devices()
|
32
|
+
sys.exit()
|
33
|
+
class HelpFormatterWithLines(argparse.HelpFormatter):
|
34
|
+
def _split_lines(self, text, width):
|
35
|
+
lines = super()._split_lines(text, width)
|
36
|
+
lines += ['']
|
37
|
+
if "simple JSON file" not in text:
|
38
|
+
return lines
|
39
|
+
lines = text.split('\n')
|
40
|
+
return lines
|
41
|
+
|
42
|
+
def parse_args():
|
43
|
+
parser = argparse.ArgumentParser(
|
44
|
+
conflict_handler='resolve', # Allow dla_benchmark for FPGA to override show_available_devices() for --help
|
45
|
+
add_help=False, formatter_class=HelpFormatterWithLines)
|
46
|
+
args = parser.add_argument_group('Options')
|
47
|
+
args.add_argument('-h', '--help', action=print_help, nargs='?', default=argparse.SUPPRESS,
|
48
|
+
help='Show this help message and exit.')
|
49
|
+
args.add_argument('-i', '--paths_to_input', action='append', nargs='+', type=str, required=False,
|
50
|
+
help='Optional. '
|
51
|
+
'Path to a folder with images and/or binaries or to specific image or binary file.'
|
52
|
+
'It is also allowed to map files to model inputs: '
|
53
|
+
'input_1:file_1/dir1,file_2/dir2,input_4:file_4/dir4 input_2:file_3/dir3 '
|
54
|
+
'Currently supported data types: bin, npy. If OPENCV is enabled, this functionality'
|
55
|
+
'is extended with the following data types: bmp, dib, jpeg, jpg, jpe, jp2, png, pbm, '
|
56
|
+
'pgm, ppm, sr, ras, tiff, tif.')
|
57
|
+
args.add_argument('-m', '--path_to_model', type=str, required=True,
|
58
|
+
help='Required. Path to an .xml/.onnx file with a trained model or '
|
59
|
+
'to a .blob file with a trained compiled model.')
|
60
|
+
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
|
61
|
+
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
|
62
|
+
'Default value is CPU. Use \'-d HETERO:<comma separated devices list>\' format to specify HETERO plugin. '
|
63
|
+
'Use \'-d MULTI:<comma separated devices list>\' format to specify MULTI plugin. '
|
64
|
+
'The application looks for a suitable plugin for the specified device.')
|
65
|
+
args.add_argument('-hint', '--perf_hint', type=str, required=False, default='', choices=('throughput', 'tput', 'cumulative_throughput', 'ctput', 'latency', 'none'),
|
66
|
+
help='Optional. Performance hint (latency or throughput or cumulative_throughput or none). Performance hint allows the OpenVINO device to select the right model-specific settings.\n'
|
67
|
+
'\'throughput\': device performance mode will be set to THROUGHPUT. \n'
|
68
|
+
'\'cumulative_throughput\': device performance mode will be set to CUMULATIVE_THROUGHPUT. \n'
|
69
|
+
'\'latency\': device performance mode will be set to LATENCY. \n'
|
70
|
+
'\'none\': no device performance mode will be set. \n'
|
71
|
+
'Using explicit \'nstreams\' or other device-specific options, please set hint to \'none\'')
|
72
|
+
args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
|
73
|
+
help='Optional. Number of iterations. '
|
74
|
+
'If not specified, the number of iterations is calculated depending on a device.')
|
75
|
+
args.add_argument('-t', '--time', type=check_positive, required=False, default=None,
|
76
|
+
help='Optional. Time in seconds to execute topology.')
|
77
|
+
|
78
|
+
shapes = parser.add_argument_group('Input shapes')
|
79
|
+
shapes.add_argument('-b', '--batch_size', type=str, required=False, default='',
|
80
|
+
help='Optional. ' +
|
81
|
+
'Batch size value. ' +
|
82
|
+
'If not specified, the batch size value is determined from Intermediate Representation')
|
83
|
+
shapes.add_argument('-shape', type=str, required=False, default='',
|
84
|
+
help='Optional. '
|
85
|
+
'Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size. '
|
86
|
+
'This parameter affect model Parameter shape, can be dynamic. For dynamic dimesions use symbol `?`, `-1` or range `low.. up`.')
|
87
|
+
shapes.add_argument('-data_shape', type=str, required=False, default='',
|
88
|
+
help='Optional. '
|
89
|
+
'Optional if model shapes are all static (original ones or set by -shape).'
|
90
|
+
'Required if at least one input shape is dynamic and input images are not provided.'
|
91
|
+
'Set shape for input tensors. For example, "input1[1,3,224,224][1,3,448,448],input2[1,4][1,8]" or "[1,3,224,224][1,3,448,448] in case of one input size.')
|
92
|
+
shapes.add_argument('-layout', type=str, required=False, default='',
|
93
|
+
help='Optional. '
|
94
|
+
'Prompts how model layouts should be treated by application. '
|
95
|
+
'For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.')
|
96
|
+
|
97
|
+
advs = parser.add_argument_group('Advanced options')
|
98
|
+
advs.add_argument('-extensions', '--extensions', type=str, required=False, default=None,
|
99
|
+
help='Optional. Path or a comma-separated list of paths to libraries (.so or .dll) with extensions.')
|
100
|
+
advs.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
|
101
|
+
help='Optional. Required for GPU custom kernels. Absolute path to an .xml file with the '
|
102
|
+
'kernels description.')
|
103
|
+
advs.add_argument('-cdir', '--cache_dir', type=str, required=False, default='',
|
104
|
+
help="Optional. Enable model caching to specified directory")
|
105
|
+
advs.add_argument('-lfile', '--load_from_file', required=False, nargs='?', default=argparse.SUPPRESS,
|
106
|
+
help="Optional. Loads model from file directly without read_model.")
|
107
|
+
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
|
108
|
+
help='Optional. Enable using sync/async API. Default value is async.')
|
109
|
+
advs.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=0,
|
110
|
+
help='Optional. Number of infer requests. Default value is determined automatically for device.')
|
111
|
+
advs.add_argument('-nstreams', '--number_streams', type=str, required=False, default=None,
|
112
|
+
help='Optional. Number of streams to use for inference on the CPU/GPU '
|
113
|
+
'(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> '
|
114
|
+
'or just <nstreams>). '
|
115
|
+
'Default value is determined automatically for a device. Please note that although the automatic selection '
|
116
|
+
'usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small models. '
|
117
|
+
'Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency '
|
118
|
+
'estimations the number of streams should be set to 1. '
|
119
|
+
'See samples README for more details.')
|
120
|
+
advs.add_argument('-inference_only', '--inference_only', type=str2bool, required=False, default=None, nargs='?', const=True,
|
121
|
+
help='Optional. If true inputs filling only once before measurements (default for static models), '
|
122
|
+
'else inputs filling is included into loop measurement (default for dynamic models)', )
|
123
|
+
advs.add_argument('-infer_precision', type=str, required=False,
|
124
|
+
help='Optional. Specifies the inference precision. Example #1: \'-infer_precision bf16\'. Example #2: \'-infer_precision CPU:bf16,GPU:f32\'')
|
125
|
+
|
126
|
+
prpr = parser.add_argument_group('Preprocessing options')
|
127
|
+
prpr.add_argument('-ip', '--input_precision', type=str, required=False, choices=INPUT_OUTPUT_PRECISION_CHOICES,
|
128
|
+
help='Optional. Specifies precision for all input layers of the model.')
|
129
|
+
prpr.add_argument('-op', '--output_precision', type=str, required=False, choices=INPUT_OUTPUT_PRECISION_CHOICES,
|
130
|
+
help='Optional. Specifies precision for all output layers of the model.')
|
131
|
+
prpr.add_argument('-iop', '--input_output_precision', type=str, required=False,
|
132
|
+
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:f16, output:f16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
|
133
|
+
prpr.add_argument('--mean_values', type=str, required=False, default='', metavar='[R,G,B]',
|
134
|
+
help='Optional. Mean values to be used for the input image per channel. Values to be provided in the [R,G,B] format. Can be defined for '
|
135
|
+
'desired input of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". The exact meaning and order of '
|
136
|
+
'channels depend on how the original model was trained. Applying the values affects performance and may cause type conversion')
|
137
|
+
prpr.add_argument('--scale_values', type=str, required=False, default='', metavar='[R,G,B]',
|
138
|
+
help='Optional. Scale values to be used for the input image per channel. Values are provided in the [R,G,B] format. Can be defined for '
|
139
|
+
'desired input of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". The exact meaning and order of '
|
140
|
+
'channels depend on how the original model was trained. If both --mean_values and --scale_values are specified, the mean is '
|
141
|
+
'subtracted first and then scale is applied regardless of the order of options in command line. Applying the values affects '
|
142
|
+
'performance and may cause type conversion')
|
143
|
+
|
144
|
+
devp = parser.add_argument_group('Device-specific performance options')
|
145
|
+
devp.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
|
146
|
+
help='Number of threads to use for inference on the CPU '
|
147
|
+
'(including HETERO and MULTI cases).')
|
148
|
+
devp.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, choices=['YES', 'NO', 'NUMA', 'HYBRID_AWARE'],
|
149
|
+
help='Optional. Enable threads->cores (\'YES\' which is OpenVINO runtime\'s default for conventional CPUs), '
|
150
|
+
'threads->(NUMA)nodes (\'NUMA\'), '
|
151
|
+
'threads->appropriate core types (\'HYBRID_AWARE\', which is OpenVINO runtime\'s default for Hybrid CPUs) '
|
152
|
+
'or completely disable (\'NO\') '
|
153
|
+
'CPU threads pinning for CPU-involved inference.')
|
154
|
+
|
155
|
+
stat = parser.add_argument_group('Statistics dumping options')
|
156
|
+
stat.add_argument('-latency_percentile', '--latency_percentile', type=int, required=False, default=50,
|
157
|
+
help='Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median).')
|
158
|
+
stat.add_argument('-report_type', '--report_type', type=str, required=False,
|
159
|
+
choices=['no_counters', 'average_counters', 'detailed_counters'],
|
160
|
+
help="Optional. Enable collecting statistics report. \"no_counters\" report contains "
|
161
|
+
"configuration options specified, resulting FPS and latency. \"average_counters\" "
|
162
|
+
"report extends \"no_counters\" report and additionally includes average PM "
|
163
|
+
"counters values for each layer from the model. \"detailed_counters\" report "
|
164
|
+
"extends \"average_counters\" report and additionally includes per-layer PM "
|
165
|
+
"counters and latency for each executed infer request.")
|
166
|
+
stat.add_argument('-report_folder', '--report_folder', type=str, required=False, default='',
|
167
|
+
help="Optional. Path to a folder where statistics report is stored.")
|
168
|
+
args.add_argument('-json_stats', '--json_stats', type=str2bool, required=False, default=False, nargs='?', const=True,
|
169
|
+
help="Optional. Enables JSON-based statistics output (by default reporting system will use CSV format). Should be used together with -report_folder option.")
|
170
|
+
stat.add_argument('-pc', '--perf_counts', type=str2bool, required=False, default=False, nargs='?', const=True,
|
171
|
+
help='Optional. Report performance counters.', )
|
172
|
+
stat.add_argument('-pcsort', '--perf_counts_sort', type=str, required=False, default="",
|
173
|
+
choices=['no_sort', 'sort', 'simple_sort'],
|
174
|
+
help='Optional. Report performance counters and analysis the sort hotpoint opts.'
|
175
|
+
' sort: Analysis opts time cost, print by hotpoint order'
|
176
|
+
' no_sort: Analysis opts time cost, print by normal order'
|
177
|
+
' simple_sort: Analysis opts time cost, only print EXECUTED opts by normal order', )
|
178
|
+
stat.add_argument('-pcseq', '--pcseq', type=str2bool, required=False, default=False, nargs='?', const=True,
|
179
|
+
help='Optional. Report latencies for each shape in -data_shape sequence.', )
|
180
|
+
advs.add_argument('-exec_graph_path', '--exec_graph_path', type=str, required=False,
|
181
|
+
help='Optional. Path to a file where to store executable graph information serialized.')
|
182
|
+
stat.add_argument('-dump_config', type=str, required=False, default='',
|
183
|
+
help="Optional. Path to JSON file to dump OpenVINO parameters, which were set by application.")
|
184
|
+
stat.add_argument('-load_config', type=str, required=False, default='',
|
185
|
+
help="Optional. Path to JSON file to load custom OpenVINO parameters.\n"
|
186
|
+
"Please note, command line parameters have higher priority then parameters from configuration file.\n"
|
187
|
+
"Example 1: a simple JSON file for HW device with primary properties.\n"
|
188
|
+
" {\n"
|
189
|
+
" \"CPU\": {\"NUM_STREAMS\": \"3\", \"PERF_COUNT\": \"NO\"}\n"
|
190
|
+
" }\n"
|
191
|
+
"Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties.\n"
|
192
|
+
" {\n"
|
193
|
+
" \"AUTO\": {\n"
|
194
|
+
" \"PERFORMANCE_HINT\": \"THROUGHPUT\",\n"
|
195
|
+
" \"PERF_COUNT\": \"NO\",\n"
|
196
|
+
" \"DEVICE_PROPERTIES\": \"{CPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:3},GPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:5}}\"\n"
|
197
|
+
" }\n"
|
198
|
+
" }")
|
199
|
+
return parser
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
NPU_DEVICE_NAME = 'NPU'
|
5
|
+
CPU_DEVICE_NAME = 'CPU'
|
6
|
+
GPU_DEVICE_NAME = 'GPU'
|
7
|
+
HETERO_DEVICE_NAME = 'HETERO'
|
8
|
+
MULTI_DEVICE_NAME = 'MULTI'
|
9
|
+
AUTO_DEVICE_NAME = 'AUTO'
|
10
|
+
UNKNOWN_DEVICE_TYPE = 'UNKNOWN'
|
11
|
+
|
12
|
+
XML_EXTENSION = '.xml'
|
13
|
+
BIN_EXTENSION = '.bin'
|
14
|
+
BLOB_EXTENSION = '.blob'
|
15
|
+
|
16
|
+
IMAGE_EXTENSIONS = ['.bmp', '.dib', '.jpeg', '.jpg', '.jpe', '.jp2', '.png',
|
17
|
+
'.pbm', '.pgm', '.ppm', '.sr', '.ras', '.tiff', '.tif']
|
18
|
+
NUMPY_EXTENSIONS = ['.npy']
|
19
|
+
BINARY_EXTENSIONS = ['.bin']
|
20
|
+
|
21
|
+
DEVICE_DURATION_IN_SECS = {
|
22
|
+
CPU_DEVICE_NAME: 60,
|
23
|
+
GPU_DEVICE_NAME: 60,
|
24
|
+
NPU_DEVICE_NAME: 60,
|
25
|
+
UNKNOWN_DEVICE_TYPE: 120
|
26
|
+
}
|