bigdl-core-npu 2.5.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl_core_npu-2.5.0.dist-info/METADATA +35 -0
- bigdl_core_npu-2.5.0.dist-info/RECORD +223 -0
- bigdl_core_npu-2.5.0.dist-info/WHEEL +5 -0
- bigdl_core_npu-2.5.0.dist-info/top_level.txt +1 -0
- intel_npu_acceleration_library/__init__.py +24 -0
- intel_npu_acceleration_library/_version.py +6 -0
- intel_npu_acceleration_library/backend/__init__.py +37 -0
- intel_npu_acceleration_library/backend/base.py +215 -0
- intel_npu_acceleration_library/backend/bindings.py +279 -0
- intel_npu_acceleration_library/backend/compression.py +24 -0
- intel_npu_acceleration_library/backend/convolution.py +58 -0
- intel_npu_acceleration_library/backend/factory.py +944 -0
- intel_npu_acceleration_library/backend/linear.py +60 -0
- intel_npu_acceleration_library/backend/matmul.py +59 -0
- intel_npu_acceleration_library/backend/mlp.py +58 -0
- intel_npu_acceleration_library/backend/ops.py +141 -0
- intel_npu_acceleration_library/backend/qlinear.py +71 -0
- intel_npu_acceleration_library/backend/qmatmul.py +66 -0
- intel_npu_acceleration_library/backend/runtime.py +210 -0
- intel_npu_acceleration_library/backend/sdpa.py +107 -0
- intel_npu_acceleration_library/backend/tensor.py +1050 -0
- intel_npu_acceleration_library/backend/utils.py +70 -0
- intel_npu_acceleration_library/compiler.py +194 -0
- intel_npu_acceleration_library/device.py +230 -0
- intel_npu_acceleration_library/dtypes.py +122 -0
- intel_npu_acceleration_library/external/openvino/__init__.py +71 -0
- intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
- intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +352 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +139 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +98 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +119 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +289 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +118 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +536 -0
- intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +256 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
- intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +460 -0
- intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
- intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
- intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
- intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +26 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +4 -0
- intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
- intel_npu_acceleration_library/external/openvino/properties/__init__.py +21 -0
- intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
- intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
- intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
- intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
- intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
- intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +18 -0
- intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3067 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +399 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +10 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +85 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +189 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +783 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
- intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
- intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +38 -0
- intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +429 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +70 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
- intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
- intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
- intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +536 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +35 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +246 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +205 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +109 -0
- intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
- intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
- intel_npu_acceleration_library/external/openvino/utils.py +98 -0
- intel_npu_acceleration_library/functional/__init__.py +8 -0
- intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
- intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
- intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
- intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
- intel_npu_acceleration_library/modelling.py +150 -0
- intel_npu_acceleration_library/nn/__init__.py +20 -0
- intel_npu_acceleration_library/nn/autograd.py +68 -0
- intel_npu_acceleration_library/nn/conv.py +257 -0
- intel_npu_acceleration_library/nn/functional.py +1207 -0
- intel_npu_acceleration_library/nn/linear.py +162 -0
- intel_npu_acceleration_library/nn/llm.py +417 -0
- intel_npu_acceleration_library/nn/module.py +393 -0
- intel_npu_acceleration_library/optimizations.py +157 -0
- intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,482 @@
|
|
1
|
+
# Copyright (C) 2018-2024 Intel Corporation
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
import re
|
7
|
+
from typing import Dict, List
|
8
|
+
import numpy as np
|
9
|
+
from collections import defaultdict
|
10
|
+
from pathlib import Path
|
11
|
+
from importlib.util import find_spec
|
12
|
+
|
13
|
+
from openvino.runtime import Tensor, PartialShape, Type
|
14
|
+
from openvino.runtime.utils.types import get_dtype
|
15
|
+
|
16
|
+
from .constants import IMAGE_EXTENSIONS, NUMPY_EXTENSIONS, BINARY_EXTENSIONS
|
17
|
+
from .logging import logger
|
18
|
+
from .utils import AppInputInfo
|
19
|
+
|
20
|
+
if find_spec('cv2') is not None:
|
21
|
+
try:
|
22
|
+
import cv2
|
23
|
+
except ImportError as ex:
|
24
|
+
raise Exception("Failed to import opencv module. "
|
25
|
+
"Please try to uninstall opencv-python "
|
26
|
+
"and install opencv-python-headless instead.") from ex
|
27
|
+
|
28
|
+
|
29
|
+
class DataQueue:
|
30
|
+
def __init__(self, input_data: dict, batch_sizes: list):
|
31
|
+
self.input_data = input_data
|
32
|
+
self.sizes_map = {}
|
33
|
+
for name, tensors in input_data.items():
|
34
|
+
self.sizes_map[name] = len(tensors)
|
35
|
+
self.index_map = defaultdict.fromkeys(input_data.keys(), 0)
|
36
|
+
self.batch_sizes = batch_sizes
|
37
|
+
self.size = len(batch_sizes)
|
38
|
+
self.current_group_id = 0
|
39
|
+
|
40
|
+
def get_next_input(self):
|
41
|
+
data = {}
|
42
|
+
for input_name, input_tensors in self.input_data.items():
|
43
|
+
data[input_name] = input_tensors[self.index_map[input_name]]
|
44
|
+
self.index_map[input_name] = (self.index_map[input_name] + 1) % self.sizes_map[input_name]
|
45
|
+
self.current_group_id = (self.current_group_id + 1) % self.size
|
46
|
+
return data
|
47
|
+
|
48
|
+
def get_next_batch_size(self):
|
49
|
+
return self.batch_sizes[self.current_group_id]
|
50
|
+
|
51
|
+
|
52
|
+
def get_group_batch_sizes(app_input_info):
|
53
|
+
batch_sizes = []
|
54
|
+
niter = max(len(info.shapes) for info in app_input_info)
|
55
|
+
for i in range(niter):
|
56
|
+
batch_size = 0
|
57
|
+
for info in app_input_info:
|
58
|
+
batch_index = info.layout.get_index_by_name('N') if info.layout.has_name('N') else -1
|
59
|
+
if batch_index != -1:
|
60
|
+
shape = info.shapes[i % len(info.shapes)]
|
61
|
+
if batch_size == 0:
|
62
|
+
batch_size = shape[batch_index]
|
63
|
+
elif batch_size != shape[batch_index]:
|
64
|
+
raise Exception("Can't deterimine batch size: batch is different for different inputs!")
|
65
|
+
if batch_size == 0:
|
66
|
+
batch_size = 1
|
67
|
+
batch_sizes.append(batch_size)
|
68
|
+
return batch_sizes
|
69
|
+
|
70
|
+
|
71
|
+
def get_batch_sizes_per_input_map(app_input_info: List[AppInputInfo]):
|
72
|
+
batch_sizes_map = {}
|
73
|
+
for info in app_input_info:
|
74
|
+
if info.layout.has_name('N'):
|
75
|
+
if info.is_dynamic:
|
76
|
+
batch_sizes_map[info.name] = info.getDimensionsByLayout('N')
|
77
|
+
else:
|
78
|
+
batch_sizes_map[info.name] = [len(info.getDimensionByLayout('N'))]
|
79
|
+
else:
|
80
|
+
batch_sizes_map[info.name] = [1] * len(info.shapes)
|
81
|
+
return batch_sizes_map
|
82
|
+
|
83
|
+
def verify_objects_to_be_used(objects_to_be_used_map: Dict[str, List[str]], info: AppInputInfo, total_frames: int, input_type_name: str):
|
84
|
+
if objects_to_be_used_map[info.name] > total_frames and objects_to_be_used_map[info.name] % total_frames != 0:
|
85
|
+
objects_to_be_used_map[info.name] = objects_to_be_used_map[info.name] - objects_to_be_used_map[info.name] % total_frames
|
86
|
+
logger.warning(f"Number of provided {input_type_name} for input '{info.name}' is not a multiple of the number of "
|
87
|
+
f"provided data shapes. Only {objects_to_be_used_map[info.name]} {input_type_name} will be processed for this input.")
|
88
|
+
elif objects_to_be_used_map[info.name] < total_frames:
|
89
|
+
logger.warning(f"Some {input_type_name} will be dublicated: {total_frames} is required, "
|
90
|
+
f"but only {objects_to_be_used_map[info.name]} were provided.")
|
91
|
+
|
92
|
+
def get_input_data(paths_to_input, app_input_info):
|
93
|
+
image_mapping, numpy_mapping, binary_mapping = get_input_file_mappings(paths_to_input, app_input_info)
|
94
|
+
|
95
|
+
image_sizes = get_image_sizes(app_input_info)
|
96
|
+
batch_sizes_map = get_batch_sizes_per_input_map(app_input_info)
|
97
|
+
|
98
|
+
images_to_be_used_map = {input_name: len(images)
|
99
|
+
for input_name, images in image_mapping.items()}
|
100
|
+
numpys_to_be_used_map = {input_name: len(images)
|
101
|
+
for input_name, images in numpy_mapping.items()}
|
102
|
+
binaries_to_be_used_map = {input_name: len(binaries)
|
103
|
+
for input_name, binaries in binary_mapping.items()}
|
104
|
+
|
105
|
+
for info in app_input_info:
|
106
|
+
if info.shapes:
|
107
|
+
total_frames = np.sum(batch_sizes_map[info.name])
|
108
|
+
if info.name in image_mapping:
|
109
|
+
verify_objects_to_be_used(images_to_be_used_map, info, total_frames, "images")
|
110
|
+
elif info.name in numpy_mapping:
|
111
|
+
verify_objects_to_be_used(numpys_to_be_used_map, info, total_frames, "numpy arrays")
|
112
|
+
elif info.name in binary_mapping:
|
113
|
+
verify_objects_to_be_used(binaries_to_be_used_map, info, total_frames, "binaries")
|
114
|
+
else:
|
115
|
+
if not (info.is_image_info and len(image_sizes) == 1):
|
116
|
+
logger.warning(f"No input files were given for input '{info.name}'!. This input will be filled with random values!")
|
117
|
+
else:
|
118
|
+
if info.name in image_mapping:
|
119
|
+
logger.info(f"Images given for input '{info.name}' will be processed with original shapes.")
|
120
|
+
elif info.name in numpy_mapping:
|
121
|
+
logger.info(f"Numpy arrays given for input '{info.name}' will be processed with original shapes.")
|
122
|
+
else:
|
123
|
+
raise Exception(f"Input {info.name} is dynamic. Provide data shapes!")
|
124
|
+
|
125
|
+
data = {}
|
126
|
+
for port, info in enumerate(app_input_info):
|
127
|
+
if info.name in image_mapping:
|
128
|
+
data[port] = get_image_tensors(image_mapping[info.name][:images_to_be_used_map[info.name]], info, batch_sizes_map[info.name])
|
129
|
+
|
130
|
+
elif info.name in numpy_mapping:
|
131
|
+
data[port] = get_numpy_tensors(numpy_mapping[info.name][:numpys_to_be_used_map[info.name]], info, batch_sizes_map[info.name])
|
132
|
+
|
133
|
+
elif info.name in binary_mapping:
|
134
|
+
data[port] = get_binary_tensors(binary_mapping[info.name][:binaries_to_be_used_map[info.name]], info, batch_sizes_map[info.name])
|
135
|
+
|
136
|
+
elif info.is_image_info and len(image_sizes) == 1:
|
137
|
+
image_size = image_sizes[0]
|
138
|
+
logger.info(f"Create input tensors for input '{info.name}' with image sizes: {image_size}")
|
139
|
+
data[port] = get_image_info_tensors(image_size, info)
|
140
|
+
else:
|
141
|
+
logger.info(f"Fill input '{info.name}' with random values ")
|
142
|
+
data[port] = fill_tensors_with_random(info)
|
143
|
+
|
144
|
+
return DataQueue(data, get_group_batch_sizes(app_input_info))
|
145
|
+
|
146
|
+
|
147
|
+
def get_image_tensors(image_paths: List[str], info: AppInputInfo, batch_sizes: List[int]) -> List[Tensor]:
|
148
|
+
if 'cv2' not in sys.modules:
|
149
|
+
logger.error("Loading images requires the opencv-python or opencv-python-headless package. "
|
150
|
+
"Please install it before continuing or run benchmark without "
|
151
|
+
"the -i flag to fill vectors with random data.")
|
152
|
+
|
153
|
+
num_shapes = len(info.shapes)
|
154
|
+
num_images = len(image_paths)
|
155
|
+
|
156
|
+
processed_frames = 0
|
157
|
+
widths = info.widths if info.is_dynamic else [info.width]
|
158
|
+
heights = info.heights if info.is_dynamic else [info.height]
|
159
|
+
process_with_original_shapes = num_shapes == 0
|
160
|
+
tensors = []
|
161
|
+
|
162
|
+
niter = max(num_shapes, num_images)
|
163
|
+
for i in range(niter):
|
164
|
+
shape = list(info.shapes[i % num_shapes]) if num_shapes else []
|
165
|
+
dtype = get_dtype(info.element_type)
|
166
|
+
images = np.ndarray(shape=shape, dtype=dtype)
|
167
|
+
image_index = processed_frames
|
168
|
+
current_batch_size = 1 if process_with_original_shapes else batch_sizes[i % num_shapes]
|
169
|
+
for b in range(current_batch_size):
|
170
|
+
image_index %= num_images
|
171
|
+
image_filename = image_paths[image_index]
|
172
|
+
logger.info(f'Prepare image {image_filename}')
|
173
|
+
image = cv2.imread(image_filename)
|
174
|
+
if process_with_original_shapes:
|
175
|
+
logger.info(f'Image will be processed with original shape - {image.shape[:-1]}')
|
176
|
+
elif info.layout.has_name('H') and info.layout.has_name('W'):
|
177
|
+
new_im_size = (widths[i % num_shapes], heights[i % num_shapes])
|
178
|
+
if image.shape[:-1] != new_im_size:
|
179
|
+
logger.warning(f"Image is resized from ({image.shape[:-1]}) to ({new_im_size})")
|
180
|
+
image = cv2.resize(image, new_im_size)
|
181
|
+
|
182
|
+
model_channel = int(str(info.channels))
|
183
|
+
image_channel = image.shape[-1]
|
184
|
+
if model_channel == 1 and image_channel == 3:
|
185
|
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
186
|
+
|
187
|
+
if model_channel == image_channel and str(info.layout) in ['[N,C,H,W]', '[C,H,W]']:
|
188
|
+
image = image.transpose((2, 0, 1))
|
189
|
+
|
190
|
+
if process_with_original_shapes:
|
191
|
+
if len(info.partial_shape) == 4:
|
192
|
+
image = np.expand_dims(image, 0)
|
193
|
+
p_shape = PartialShape(image.shape)
|
194
|
+
if info.partial_shape.compatible(p_shape):
|
195
|
+
info.data_shapes.append(p_shape.to_shape())
|
196
|
+
else:
|
197
|
+
raise Exception(f"Data shape '{str(p_shape)}' provided for input '{info.name}' "
|
198
|
+
f"is not compatible with partial shape '{str(info.partial_shape)}' for this input.")
|
199
|
+
tensors.append(Tensor(image.astype(dtype)))
|
200
|
+
else:
|
201
|
+
try:
|
202
|
+
if 3 == images[b].ndim and 1 == images[b].shape[2] and 2 == image.ndim:
|
203
|
+
# The model last dim has length 1, which means it takes greyscale images.
|
204
|
+
# Extend input image dims to match it
|
205
|
+
images[b] = image[:, :, None]
|
206
|
+
else:
|
207
|
+
images[b] = image
|
208
|
+
except ValueError:
|
209
|
+
raise Exception(f"Image shape {image.shape} is not compatible with input shape {shape}! "
|
210
|
+
f"Make sure -i parameter is valid.")
|
211
|
+
image_index += 1
|
212
|
+
processed_frames += current_batch_size
|
213
|
+
if not process_with_original_shapes:
|
214
|
+
tensors.append(Tensor(images))
|
215
|
+
return tensors
|
216
|
+
|
217
|
+
|
218
|
+
def get_numpy_tensors(numpy_paths: List[str], info: AppInputInfo, batch_sizes: List[int]) -> List[Tensor]:
|
219
|
+
|
220
|
+
num_shapes = len(info.shapes)
|
221
|
+
num_arrays = len(numpy_paths)
|
222
|
+
|
223
|
+
processed_frames = 0
|
224
|
+
process_with_original_shapes = num_shapes == 0
|
225
|
+
tensors = []
|
226
|
+
|
227
|
+
niter = max(num_shapes, num_arrays)
|
228
|
+
for i in range(niter):
|
229
|
+
shape = list(info.shapes[i % num_shapes]) if num_shapes else []
|
230
|
+
dtype = get_dtype(info.element_type)
|
231
|
+
numpy_arrays = np.ndarray(shape=shape, dtype=dtype)
|
232
|
+
numpy_index = processed_frames
|
233
|
+
|
234
|
+
current_batch_size = 1 if process_with_original_shapes \
|
235
|
+
else batch_sizes[i % num_shapes]
|
236
|
+
|
237
|
+
for b in range(current_batch_size):
|
238
|
+
numpy_index %= num_arrays
|
239
|
+
numpy_filename: str = numpy_paths[numpy_index]
|
240
|
+
extension = numpy_filename.lower().split('.')[-1]
|
241
|
+
if extension == "npy":
|
242
|
+
numpy_arr: np.ndarray = np.load(numpy_filename)
|
243
|
+
|
244
|
+
if list(numpy_arr.shape) != shape and not process_with_original_shapes:
|
245
|
+
raise Exception(
|
246
|
+
f"Numpy array shape mismatch. File {numpy_filename} "
|
247
|
+
f"has shape: {numpy_arr.shape}, expected: {shape}")
|
248
|
+
|
249
|
+
if numpy_arr.dtype != dtype:
|
250
|
+
raise Exception(
|
251
|
+
f"Numpy array in file {numpy_filename} is of "
|
252
|
+
f"{numpy_arr.dtype} format, which does not match "
|
253
|
+
f"input type {dtype}.")
|
254
|
+
|
255
|
+
if process_with_original_shapes:
|
256
|
+
if len(info.partial_shape) - 1 == len(numpy_arr.shape):
|
257
|
+
numpy_arr = np.expand_dims(numpy_arr, 0)
|
258
|
+
|
259
|
+
p_shape = PartialShape(numpy_arr.shape)
|
260
|
+
if info.partial_shape.compatible(p_shape):
|
261
|
+
info.data_shapes.append(p_shape.to_shape())
|
262
|
+
else:
|
263
|
+
raise Exception(f"Data shape '{str(p_shape)}' provided for input '{info.name}' "
|
264
|
+
f"is not compatible with partial shape '{str(info.partial_shape)}' for this input.")
|
265
|
+
tensors.append(Tensor(numpy_arr))
|
266
|
+
else:
|
267
|
+
try:
|
268
|
+
if info.layout.has_name("N"):
|
269
|
+
numpy_arrays[[None] * info.layout.get_index_by_name("N") + [b]] = numpy_arr
|
270
|
+
else:
|
271
|
+
numpy_arrays = numpy_arr
|
272
|
+
except ValueError:
|
273
|
+
raise Exception(f"Numpy array shape {numpy_arr.shape} is not compatible with input shape {shape}! "
|
274
|
+
f"Make sure -i parameter is valid.")
|
275
|
+
else:
|
276
|
+
raise Exception(
|
277
|
+
f"Unsupported numpy file type: {extension}")
|
278
|
+
numpy_index += 1
|
279
|
+
|
280
|
+
processed_frames += current_batch_size
|
281
|
+
if not process_with_original_shapes:
|
282
|
+
tensors.append(Tensor(numpy_arrays))
|
283
|
+
|
284
|
+
return tensors
|
285
|
+
|
286
|
+
def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes: List[int]) -> List[Tensor]:
|
287
|
+
num_shapes = len(info.shapes)
|
288
|
+
num_binaries = len(binary_paths)
|
289
|
+
niter = max(num_shapes, num_binaries)
|
290
|
+
processed_frames = 0
|
291
|
+
tensors = []
|
292
|
+
for i in range(niter):
|
293
|
+
shape_id = i % num_shapes
|
294
|
+
dtype = get_dtype(info.element_type)
|
295
|
+
shape = list(info.shapes[shape_id])
|
296
|
+
binaries = np.ndarray(shape=shape, dtype=dtype)
|
297
|
+
binary_index = processed_frames
|
298
|
+
current_batch_size = batch_sizes[shape_id]
|
299
|
+
for b in range(current_batch_size):
|
300
|
+
binary_index %= num_binaries
|
301
|
+
binary_filename: str = binary_paths[binary_index]
|
302
|
+
extension = binary_filename.lower().split('.')[-1]
|
303
|
+
if extension == "bin":
|
304
|
+
binary_file_size = os.path.getsize(binary_filename)
|
305
|
+
blob_size = dtype.itemsize * int(np.prod(shape))
|
306
|
+
if blob_size != binary_file_size:
|
307
|
+
raise Exception(
|
308
|
+
f"File {binary_filename} contains {binary_file_size} bytes but model expects {blob_size}")
|
309
|
+
from_file = np.reshape(np.fromfile(binary_filename, dtype), shape)
|
310
|
+
if info.layout.has_name("N"):
|
311
|
+
binaries[[None] * info.layout.get_index_by_name("N") + [b]] = from_file
|
312
|
+
else:
|
313
|
+
binaries = from_file
|
314
|
+
else:
|
315
|
+
raise Exception(
|
316
|
+
f"Unsupported binary file type: {extension}")
|
317
|
+
|
318
|
+
binary_index += 1
|
319
|
+
processed_frames += current_batch_size
|
320
|
+
tensors.append(Tensor(binaries))
|
321
|
+
return tensors
|
322
|
+
|
323
|
+
|
324
|
+
def get_image_sizes(app_input_info):
|
325
|
+
image_sizes = []
|
326
|
+
for info in app_input_info:
|
327
|
+
if info.is_image:
|
328
|
+
if info.is_static:
|
329
|
+
image_sizes.append((info.width, info.height))
|
330
|
+
else:
|
331
|
+
info_image_sizes = []
|
332
|
+
for w, h in zip(info.widths, info.heights):
|
333
|
+
info_image_sizes.append((w, h))
|
334
|
+
image_sizes.append(info_image_sizes)
|
335
|
+
return image_sizes
|
336
|
+
|
337
|
+
|
338
|
+
def get_image_info_tensors(image_sizes, layer):
|
339
|
+
im_infos = []
|
340
|
+
for shape, image_size in zip(layer.shapes, image_sizes):
|
341
|
+
im_info = np.ndarray(shape, dtype=get_dtype(layer.element_type))
|
342
|
+
for b in range(shape[0]):
|
343
|
+
for i in range(shape[1]):
|
344
|
+
im_info[b][i] = image_size if i in [0, 1] else 1
|
345
|
+
im_infos.append(Tensor(im_info))
|
346
|
+
return im_infos
|
347
|
+
|
348
|
+
def get_random_4bit_tensor(shape, element_type, rs):
|
349
|
+
pack_shape = [x for x in shape]
|
350
|
+
pack_shape[-1] = pack_shape[-1]*element_type.bitwidth
|
351
|
+
rand_data = (rs.uniform(0, 15, list(pack_shape)) >= 7).astype(int).flatten()
|
352
|
+
rr = np.packbits(rand_data)
|
353
|
+
return Tensor(rr, shape, element_type)
|
354
|
+
|
355
|
+
def fill_tensors_with_random(layer):
|
356
|
+
is_4bit = layer.element_type.bitwidth == 4
|
357
|
+
dtype = np.uint8 if is_4bit else get_dtype(layer.element_type)
|
358
|
+
rand_min, rand_max = (0, 1) if dtype == bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
|
359
|
+
# np.random.uniform excludes high: add 1 to have it generated
|
360
|
+
if np.dtype(dtype).kind in ['i', 'u', 'b']:
|
361
|
+
rand_max += 1
|
362
|
+
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(0)))
|
363
|
+
input_tensors = []
|
364
|
+
for shape in layer.shapes:
|
365
|
+
if shape:
|
366
|
+
if is_4bit:
|
367
|
+
ov_tensor = get_random_4bit_tensor(shape, layer.element_type, rs)
|
368
|
+
else:
|
369
|
+
ov_tensor = Tensor(rs.uniform(rand_min, rand_max, list(shape)).astype(dtype))
|
370
|
+
else:
|
371
|
+
if is_4bit:
|
372
|
+
ov_tensor = get_random_4bit_tensor([1], layer.element_type, rs)
|
373
|
+
else:
|
374
|
+
ov_tensor = Tensor(np.ndarray([], dtype, np.array(rs.uniform(rand_min, rand_max)).astype(dtype)))
|
375
|
+
input_tensors.append(ov_tensor)
|
376
|
+
return input_tensors
|
377
|
+
|
378
|
+
|
379
|
+
def get_input_file_mappings(paths_to_inputs, app_input_info):
|
380
|
+
image_dicts_list = []
|
381
|
+
numpy_dicts_list = []
|
382
|
+
binary_dicts_list = []
|
383
|
+
|
384
|
+
for path in paths_to_inputs:
|
385
|
+
image_dict, numpy_dict, binary_dict = parse_path(path, app_input_info)
|
386
|
+
image_dicts_list.append(image_dict)
|
387
|
+
numpy_dicts_list.append(numpy_dict)
|
388
|
+
binary_dicts_list.append(binary_dict)
|
389
|
+
|
390
|
+
def merge_dicts(dicts_list):
|
391
|
+
merged = defaultdict(list)
|
392
|
+
for dict in dicts_list:
|
393
|
+
for k, v in dict.items():
|
394
|
+
merged[k] += v
|
395
|
+
return merged
|
396
|
+
|
397
|
+
def remove_empty_items(dict):
|
398
|
+
return {k: sorted(v) for k, v in dict.items() if v}
|
399
|
+
|
400
|
+
return remove_empty_items(merge_dicts(image_dicts_list)), \
|
401
|
+
remove_empty_items(merge_dicts(numpy_dicts_list)), \
|
402
|
+
remove_empty_items(merge_dicts(binary_dicts_list))
|
403
|
+
|
404
|
+
|
405
|
+
def parse_path(path, app_input_info):
|
406
|
+
"""
|
407
|
+
Parse "input_1:file1/dir1,file2/dir2,input_2:file3/dir3 or file1/dir1,file2/dir2" into three dicts,
|
408
|
+
each containing input_name (str) as key and list of strings of binary/numpy/image filepaths as values.
|
409
|
+
"""
|
410
|
+
input_names = list(info.name for info in app_input_info)
|
411
|
+
input_node_names = list(info.node_name for info in app_input_info)
|
412
|
+
parsed_names = re.findall(r"((?=[^,])(?![a-zA-Z]:\\)[\w\.]+):", path)
|
413
|
+
wrong_names = list(name for name in parsed_names if name not in input_names + input_node_names)
|
414
|
+
if wrong_names:
|
415
|
+
raise Exception(
|
416
|
+
f"Wrong input mapping! Cannot find inputs: {wrong_names}. "
|
417
|
+
f"Available inputs: {input_names}. "
|
418
|
+
"Please check `-i` input data"
|
419
|
+
)
|
420
|
+
tensor_names = [parsed_name if parsed_name in input_names else input_names[input_node_names.index(parsed_name)] for parsed_name in parsed_names]
|
421
|
+
input_pathes = [path for path in re.split(r"(?=[^,])(?![a-zA-Z]:\\)[\w\.]+:", path) if path]
|
422
|
+
input_path_mapping = defaultdict(list)
|
423
|
+
# input mapping is used
|
424
|
+
if tensor_names:
|
425
|
+
input_path_mapping = {input_: files.strip(",").split(",") for input_, files in zip(tensor_names, input_pathes)}
|
426
|
+
else:
|
427
|
+
input_files = list()
|
428
|
+
_input_pathes = input_pathes[0].strip(",").split(",")
|
429
|
+
for _input_path in _input_pathes:
|
430
|
+
input_path = Path(_input_path)
|
431
|
+
if input_path.exists():
|
432
|
+
if input_path.is_dir():
|
433
|
+
input_files += list(str(file_path) for file_path in input_path.iterdir())
|
434
|
+
elif input_path.is_file():
|
435
|
+
input_files.append(str(input_path))
|
436
|
+
else:
|
437
|
+
raise Exception(f"Path '{str(input_path)}' doesn't exist \n {str(input_path)}")
|
438
|
+
num_files, num_inputs = len(input_files), len(app_input_info)
|
439
|
+
if num_inputs > 1:
|
440
|
+
logger.warning(f"Model has {num_inputs} inputs. It's recommended to use name mapping to specify parameters for each input.")
|
441
|
+
if num_files > num_inputs and num_files % num_inputs != 0:
|
442
|
+
input_files = input_files[:num_files - num_files % num_inputs]
|
443
|
+
logger.warning(f"Number of provided input files '{num_files}' is not a multiple of the number of "
|
444
|
+
f"model inputs. Only {len(input_files)} files fill be used.")
|
445
|
+
num_files = len(input_files)
|
446
|
+
inputs_to_fill = list(info.name for info in app_input_info if not info.is_image_info)
|
447
|
+
for i in range(num_files):
|
448
|
+
input_path_mapping[inputs_to_fill[i % len(inputs_to_fill)]].append(input_files[i])
|
449
|
+
|
450
|
+
images_mapping = defaultdict(list)
|
451
|
+
numpy_mapping = defaultdict(list)
|
452
|
+
binary_mapping = defaultdict(list)
|
453
|
+
|
454
|
+
unsupported_files = list()
|
455
|
+
for input_name, _input_pathes in input_path_mapping.items():
|
456
|
+
for _input_path in _input_pathes:
|
457
|
+
input_path = Path(_input_path)
|
458
|
+
if input_path.exists():
|
459
|
+
files = list()
|
460
|
+
if input_path.is_dir():
|
461
|
+
files = input_path.iterdir()
|
462
|
+
elif input_path.is_file():
|
463
|
+
files = [input_path]
|
464
|
+
for file in files:
|
465
|
+
if file.suffix.lower() in IMAGE_EXTENSIONS:
|
466
|
+
images_mapping[input_name].append(str(file))
|
467
|
+
elif file.suffix.lower() in NUMPY_EXTENSIONS:
|
468
|
+
numpy_mapping[input_name].append(str(file))
|
469
|
+
elif file.suffix.lower() in BINARY_EXTENSIONS:
|
470
|
+
binary_mapping[input_name].append(str(file))
|
471
|
+
else:
|
472
|
+
unsupported_files.append(str(file))
|
473
|
+
else:
|
474
|
+
raise Exception(f"Path for input '{input_name}' doesn't exist \n {str(input_path)}")
|
475
|
+
if unsupported_files:
|
476
|
+
logger.warning(f"This files has unsupported extensions and will "
|
477
|
+
f"be ignored: {unsupported_files}.\n"
|
478
|
+
f"Supported extentions:\n"
|
479
|
+
f"Images: {IMAGE_EXTENSIONS}\n"
|
480
|
+
f"Binary: {BINARY_EXTENSIONS}\n"
|
481
|
+
f"Numpy: {NUMPY_EXTENSIONS}")
|
482
|
+
return images_mapping, numpy_mapping, binary_mapping
|