bigdl-core-npu 2.6.0b20250114__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/include/common.h +96 -0
  3. bigdl-core-npu/include/npu_llm.h +74 -0
  4. bigdl-core-npu/npu_llm.dll +0 -0
  5. bigdl-core-npu/npu_llm.lib +0 -0
  6. bigdl_core_npu-2.6.0b20250114.dist-info/METADATA +44 -0
  7. bigdl_core_npu-2.6.0b20250114.dist-info/RECORD +234 -0
  8. bigdl_core_npu-2.6.0b20250114.dist-info/WHEEL +5 -0
  9. bigdl_core_npu-2.6.0b20250114.dist-info/top_level.txt +2 -0
  10. intel_npu_acceleration_library/__init__.py +24 -0
  11. intel_npu_acceleration_library/_version.py +6 -0
  12. intel_npu_acceleration_library/backend/__init__.py +37 -0
  13. intel_npu_acceleration_library/backend/base.py +250 -0
  14. intel_npu_acceleration_library/backend/bindings.py +383 -0
  15. intel_npu_acceleration_library/backend/compression.py +24 -0
  16. intel_npu_acceleration_library/backend/convolution.py +58 -0
  17. intel_npu_acceleration_library/backend/factory.py +1161 -0
  18. intel_npu_acceleration_library/backend/linear.py +60 -0
  19. intel_npu_acceleration_library/backend/matmul.py +59 -0
  20. intel_npu_acceleration_library/backend/mlp.py +58 -0
  21. intel_npu_acceleration_library/backend/ops.py +142 -0
  22. intel_npu_acceleration_library/backend/qlinear.py +75 -0
  23. intel_npu_acceleration_library/backend/qmatmul.py +66 -0
  24. intel_npu_acceleration_library/backend/runtime.py +215 -0
  25. intel_npu_acceleration_library/backend/sdpa.py +107 -0
  26. intel_npu_acceleration_library/backend/tensor.py +1120 -0
  27. intel_npu_acceleration_library/backend/utils.py +70 -0
  28. intel_npu_acceleration_library/compiler.py +194 -0
  29. intel_npu_acceleration_library/device.py +230 -0
  30. intel_npu_acceleration_library/dtypes.py +155 -0
  31. intel_npu_acceleration_library/external/openvino/__init__.py +72 -0
  32. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +21 -0
  33. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  34. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  35. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  36. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  37. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  38. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  39. intel_npu_acceleration_library/external/openvino/frontend/__init__.py +34 -0
  40. intel_npu_acceleration_library/external/openvino/frontend/frontend.py +44 -0
  41. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  42. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  43. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  44. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  45. intel_npu_acceleration_library/external/openvino/frontend/onnx/__init__.py +15 -0
  46. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  47. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  48. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  49. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  50. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  51. intel_npu_acceleration_library/external/openvino/frontend/paddle/__init__.py +15 -0
  52. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  53. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  54. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  55. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  56. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  57. intel_npu_acceleration_library/external/openvino/frontend/pytorch/__init__.py +19 -0
  58. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +370 -0
  59. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +180 -0
  60. intel_npu_acceleration_library/external/openvino/frontend/pytorch/module_extension.py +39 -0
  61. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +118 -0
  62. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +131 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend_utils.py +85 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/compile.py +141 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/decompositions.py +116 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/execute.py +189 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +290 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +126 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +568 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +258 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/__init__.py +16 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/graph_iterator.py +116 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/node_decoder.py +219 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  82. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  83. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  84. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +481 -0
  85. intel_npu_acceleration_library/external/openvino/helpers/__init__.py +6 -0
  86. intel_npu_acceleration_library/external/openvino/helpers/packing.py +87 -0
  87. intel_npu_acceleration_library/external/openvino/preprocess/README.md +60 -0
  88. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +28 -0
  89. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/__init__.py +15 -0
  90. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/preprocess_converter.py +47 -0
  91. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +5 -0
  92. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/torchvision_preprocessing.py +347 -0
  93. intel_npu_acceleration_library/external/openvino/properties/__init__.py +22 -0
  94. intel_npu_acceleration_library/external/openvino/properties/_properties.py +55 -0
  95. intel_npu_acceleration_library/external/openvino/properties/device/__init__.py +14 -0
  96. intel_npu_acceleration_library/external/openvino/properties/hint/__init__.py +15 -0
  97. intel_npu_acceleration_library/external/openvino/properties/intel_auto/__init__.py +12 -0
  98. intel_npu_acceleration_library/external/openvino/properties/intel_cpu/__init__.py +8 -0
  99. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/__init__.py +12 -0
  100. intel_npu_acceleration_library/external/openvino/properties/intel_gpu/hint/__init__.py +11 -0
  101. intel_npu_acceleration_library/external/openvino/properties/log/__init__.py +11 -0
  102. intel_npu_acceleration_library/external/openvino/properties/streams/__init__.py +11 -0
  103. intel_npu_acceleration_library/external/openvino/runtime/__init__.py +85 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/exceptions.py +17 -0
  105. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +631 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +19 -0
  107. intel_npu_acceleration_library/external/openvino/runtime/op/util/__init__.py +22 -0
  108. intel_npu_acceleration_library/external/openvino/runtime/opset1/__init__.py +112 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +3068 -0
  110. intel_npu_acceleration_library/external/openvino/runtime/opset10/__init__.py +179 -0
  111. intel_npu_acceleration_library/external/openvino/runtime/opset10/ops.py +173 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/opset11/__init__.py +179 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/opset11/ops.py +107 -0
  114. intel_npu_acceleration_library/external/openvino/runtime/opset12/__init__.py +180 -0
  115. intel_npu_acceleration_library/external/openvino/runtime/opset12/ops.py +120 -0
  116. intel_npu_acceleration_library/external/openvino/runtime/opset13/__init__.py +188 -0
  117. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +398 -0
  118. intel_npu_acceleration_library/external/openvino/runtime/opset14/__init__.py +190 -0
  119. intel_npu_acceleration_library/external/openvino/runtime/opset14/ops.py +171 -0
  120. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +17 -0
  121. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +276 -0
  122. intel_npu_acceleration_library/external/openvino/runtime/opset2/__init__.py +118 -0
  123. intel_npu_acceleration_library/external/openvino/runtime/opset2/ops.py +216 -0
  124. intel_npu_acceleration_library/external/openvino/runtime/opset3/__init__.py +134 -0
  125. intel_npu_acceleration_library/external/openvino/runtime/opset3/ops.py +638 -0
  126. intel_npu_acceleration_library/external/openvino/runtime/opset4/__init__.py +145 -0
  127. intel_npu_acceleration_library/external/openvino/runtime/opset4/ops.py +464 -0
  128. intel_npu_acceleration_library/external/openvino/runtime/opset5/__init__.py +152 -0
  129. intel_npu_acceleration_library/external/openvino/runtime/opset5/ops.py +372 -0
  130. intel_npu_acceleration_library/external/openvino/runtime/opset6/__init__.py +154 -0
  131. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +215 -0
  132. intel_npu_acceleration_library/external/openvino/runtime/opset7/__init__.py +158 -0
  133. intel_npu_acceleration_library/external/openvino/runtime/opset7/ops.py +169 -0
  134. intel_npu_acceleration_library/external/openvino/runtime/opset8/__init__.py +169 -0
  135. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +787 -0
  136. intel_npu_acceleration_library/external/openvino/runtime/opset9/__init__.py +175 -0
  137. intel_npu_acceleration_library/external/openvino/runtime/opset9/ops.py +341 -0
  138. intel_npu_acceleration_library/external/openvino/runtime/opset_utils.py +22 -0
  139. intel_npu_acceleration_library/external/openvino/runtime/passes/__init__.py +19 -0
  140. intel_npu_acceleration_library/external/openvino/runtime/passes/graph_rewrite.py +33 -0
  141. intel_npu_acceleration_library/external/openvino/runtime/passes/manager.py +26 -0
  142. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +40 -0
  143. intel_npu_acceleration_library/external/openvino/runtime/properties/hint/__init__.py +25 -0
  144. intel_npu_acceleration_library/external/openvino/runtime/utils/__init__.py +7 -0
  145. intel_npu_acceleration_library/external/openvino/runtime/utils/broadcasting.py +44 -0
  146. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/__init__.py +8 -0
  147. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +447 -0
  148. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/wrappers.py +148 -0
  149. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +156 -0
  150. intel_npu_acceleration_library/external/openvino/runtime/utils/input_validation.py +133 -0
  151. intel_npu_acceleration_library/external/openvino/runtime/utils/node_factory.py +127 -0
  152. intel_npu_acceleration_library/external/openvino/runtime/utils/reduction.py +25 -0
  153. intel_npu_acceleration_library/external/openvino/runtime/utils/types.py +175 -0
  154. intel_npu_acceleration_library/external/openvino/tools/__init__.py +4 -0
  155. intel_npu_acceleration_library/external/openvino/tools/benchmark/__init__.py +3 -0
  156. intel_npu_acceleration_library/external/openvino/tools/benchmark/benchmark.py +186 -0
  157. intel_npu_acceleration_library/external/openvino/tools/benchmark/main.py +695 -0
  158. intel_npu_acceleration_library/external/openvino/tools/benchmark/parameters.py +199 -0
  159. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/__init__.py +3 -0
  160. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/constants.py +26 -0
  161. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +482 -0
  162. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/logging.py +8 -0
  163. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/statistics_report.py +296 -0
  164. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/utils.py +836 -0
  165. intel_npu_acceleration_library/external/openvino/tools/ovc/__init__.py +20 -0
  166. intel_npu_acceleration_library/external/openvino/tools/ovc/__main__.py +10 -0
  167. intel_npu_acceleration_library/external/openvino/tools/ovc/cli_parser.py +633 -0
  168. intel_npu_acceleration_library/external/openvino/tools/ovc/convert.py +102 -0
  169. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_data_type.py +82 -0
  170. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +550 -0
  171. intel_npu_acceleration_library/external/openvino/tools/ovc/environment_setup_utils.py +50 -0
  172. intel_npu_acceleration_library/external/openvino/tools/ovc/error.py +49 -0
  173. intel_npu_acceleration_library/external/openvino/tools/ovc/get_ov_update_message.py +16 -0
  174. intel_npu_acceleration_library/external/openvino/tools/ovc/help.py +45 -0
  175. intel_npu_acceleration_library/external/openvino/tools/ovc/logger.py +91 -0
  176. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +40 -0
  177. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/__init__.py +2 -0
  178. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/analysis.py +46 -0
  179. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/check_config.py +57 -0
  180. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/extractor.py +447 -0
  181. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  182. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/layout_utils.py +73 -0
  183. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +32 -0
  184. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/offline_transformations.py +107 -0
  185. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +83 -0
  186. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +298 -0
  187. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/preprocessing.py +220 -0
  188. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +214 -0
  189. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/shape_utils.py +109 -0
  190. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/type_utils.py +82 -0
  191. intel_npu_acceleration_library/external/openvino/tools/ovc/ovc.py +13 -0
  192. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_params.py +6 -0
  193. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_stub.py +28 -0
  194. intel_npu_acceleration_library/external/openvino/tools/ovc/telemetry_utils.py +118 -0
  195. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +196 -0
  196. intel_npu_acceleration_library/external/openvino/tools/ovc/version.py +80 -0
  197. intel_npu_acceleration_library/external/openvino/torch/__init__.py +5 -0
  198. intel_npu_acceleration_library/external/openvino/utils.py +115 -0
  199. intel_npu_acceleration_library/functional/__init__.py +8 -0
  200. intel_npu_acceleration_library/functional/scaled_dot_product_attention.py +47 -0
  201. intel_npu_acceleration_library/lib/Release/cache.json +113732 -0
  202. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  203. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  204. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  205. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  206. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  207. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  208. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  209. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  210. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  211. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  212. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  213. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  214. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  215. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  216. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  217. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  218. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  219. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  220. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  221. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  222. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  223. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  224. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  225. intel_npu_acceleration_library/modelling.py +150 -0
  226. intel_npu_acceleration_library/nn/__init__.py +20 -0
  227. intel_npu_acceleration_library/nn/autograd.py +68 -0
  228. intel_npu_acceleration_library/nn/conv.py +257 -0
  229. intel_npu_acceleration_library/nn/functional.py +1207 -0
  230. intel_npu_acceleration_library/nn/linear.py +162 -0
  231. intel_npu_acceleration_library/nn/llm.py +417 -0
  232. intel_npu_acceleration_library/nn/module.py +393 -0
  233. intel_npu_acceleration_library/optimizations.py +157 -0
  234. intel_npu_acceleration_library/quantization.py +174 -0
@@ -0,0 +1,695 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import os
5
+ import sys
6
+ from datetime import datetime
7
+
8
+ from openvino.runtime import Dimension,properties
9
+
10
+ from openvino.tools.benchmark.benchmark import Benchmark
11
+ from openvino.tools.benchmark.parameters import parse_args
12
+ from openvino.tools.benchmark.utils.constants import MULTI_DEVICE_NAME, \
13
+ CPU_DEVICE_NAME, GPU_DEVICE_NAME, \
14
+ BLOB_EXTENSION, AUTO_DEVICE_NAME
15
+ from openvino.tools.benchmark.utils.inputs_filling import get_input_data
16
+ from openvino.tools.benchmark.utils.logging import logger
17
+ from openvino.tools.benchmark.utils.utils import next_step, get_number_iterations, pre_post_processing, \
18
+ process_help_inference_string, print_perf_counters, print_perf_counters_sort, dump_exec_graph, get_duration_in_milliseconds, \
19
+ get_command_line_arguments, parse_value_per_device, parse_devices, get_inputs_info, \
20
+ print_inputs_and_outputs_info, get_network_batch_size, load_config, dump_config, get_latency_groups, \
21
+ check_for_static, can_measure_as_static, parse_value_for_virtual_device, is_virtual_device, is_virtual_device_found
22
+ from openvino.tools.benchmark.utils.statistics_report import StatisticsReport, JsonStatisticsReport, CsvStatisticsReport, \
23
+ averageCntReport, detailedCntReport
24
+
25
+ def parse_and_check_command_line():
26
+ def arg_not_empty(arg_value,empty_value):
27
+ return not arg_value is None and not arg_value == empty_value
28
+
29
+ parser = parse_args()
30
+ args = parser.parse_args()
31
+
32
+ if args.latency_percentile < 1 or args.latency_percentile > 100:
33
+ parser.print_help()
34
+ raise RuntimeError("The percentile value is incorrect. The applicable values range is [1, 100].")
35
+
36
+ if not args.perf_hint == "none" and (arg_not_empty(args.number_streams, "") or arg_not_empty(args.number_threads, 0) or arg_not_empty(args.infer_threads_pinning, "")):
37
+ raise Exception("-nstreams, -nthreads and -pin options are fine tune options. To use them you " \
38
+ "should explicitely set -hint option to none. This is not OpenVINO limitation " \
39
+ "(those options can be used in OpenVINO together), but a benchmark_app UI rule.")
40
+
41
+ if args.report_type == "average_counters" and MULTI_DEVICE_NAME in args.target_device:
42
+ raise Exception("only detailed_counters report type is supported for MULTI device")
43
+
44
+ _, ext = os.path.splitext(args.path_to_model)
45
+ is_network_compiled = True if ext == BLOB_EXTENSION else False
46
+ is_precisiton_set = not (args.input_precision == "" and args.output_precision == "" and args.input_output_precision == "")
47
+
48
+ if is_network_compiled and is_precisiton_set:
49
+ raise Exception("Cannot set precision for a compiled model. " \
50
+ "Please re-compile your model with required precision.")
51
+
52
+ return args, is_network_compiled
53
+
54
+ def main():
55
+ statistics = None
56
+ try:
57
+ # ------------------------------ 1. Parsing and validating input arguments ------------------------------
58
+ next_step()
59
+ logger.info("Parsing input parameters")
60
+ args, is_network_compiled = parse_and_check_command_line()
61
+
62
+ command_line_arguments = get_command_line_arguments(sys.argv)
63
+ if args.report_type:
64
+ _statistics_class = JsonStatisticsReport if args.json_stats else CsvStatisticsReport
65
+ statistics = _statistics_class(StatisticsReport.Config(args.report_type, args.report_folder))
66
+ statistics.add_parameters(StatisticsReport.Category.COMMAND_LINE_PARAMETERS, command_line_arguments)
67
+
68
+ def is_flag_set_in_command_line(flag):
69
+ return any(x.strip('-') == flag for x, y in command_line_arguments)
70
+
71
+ device_name = args.target_device
72
+
73
+ devices = parse_devices(device_name)
74
+ device_number_streams = parse_value_per_device(devices, args.number_streams, "nstreams")
75
+ device_infer_precision = parse_value_per_device(devices, args.infer_precision, "infer_precision")
76
+
77
+ config = {}
78
+ if args.load_config:
79
+ load_config(args.load_config, config)
80
+
81
+ if is_network_compiled:
82
+ logger.info("Model is compiled")
83
+
84
+ # ------------------------------ 2. Loading OpenVINO Runtime -------------------------------------------
85
+ next_step(step_id=2)
86
+
87
+ benchmark = Benchmark(args.target_device, args.number_infer_requests,
88
+ args.number_iterations, args.time, args.api_type, args.inference_only)
89
+
90
+ if args.extensions:
91
+ benchmark.add_extension(path_to_extensions=args.extensions)
92
+
93
+ ## GPU (clDNN) Extensions
94
+ if GPU_DEVICE_NAME in device_name and args.path_to_cldnn_config:
95
+ if GPU_DEVICE_NAME not in config.keys():
96
+ config[GPU_DEVICE_NAME] = {}
97
+ config[GPU_DEVICE_NAME]['CONFIG_FILE'] = args.path_to_cldnn_config
98
+
99
+ if GPU_DEVICE_NAME in config.keys() and 'CONFIG_FILE' in config[GPU_DEVICE_NAME].keys():
100
+ cldnn_config = config[GPU_DEVICE_NAME]['CONFIG_FILE']
101
+ benchmark.add_extension(path_to_cldnn_config=cldnn_config)
102
+
103
+ benchmark.print_version_info()
104
+
105
+ # --------------------- 3. Setting device configuration --------------------------------------------------------
106
+ next_step()
107
+
108
+ def set_performance_hint(device):
109
+ perf_hint = properties.hint.PerformanceMode.THROUGHPUT
110
+ supported_properties = benchmark.core.get_property(device, properties.supported_properties())
111
+ if properties.hint.performance_mode() in supported_properties:
112
+ if is_flag_set_in_command_line('hint'):
113
+ if args.perf_hint == "throughput" or args.perf_hint == "tput":
114
+ perf_hint = properties.hint.PerformanceMode.THROUGHPUT
115
+ elif args.perf_hint == "latency":
116
+ perf_hint = properties.hint.PerformanceMode.LATENCY
117
+ elif args.perf_hint == "cumulative_throughput" or args.perf_hint == "ctput":
118
+ perf_hint = properties.hint.PerformanceMode.CUMULATIVE_THROUGHPUT
119
+ elif args.perf_hint=='none':
120
+ # Not set PerformanceMode, and plugin will apply its internal default PerformanceMode
121
+ return
122
+ else:
123
+ raise RuntimeError("Incorrect performance hint. Please set -hint option to"
124
+ "`throughput`(tput), `latency', 'cumulative_throughput'(ctput) value or 'none'.")
125
+ else:
126
+ perf_hint = properties.hint.PerformanceMode.LATENCY if benchmark.api_type == "sync" else properties.hint.PerformanceMode.THROUGHPUT
127
+ logger.warning(f"Performance hint was not explicitly specified in command line. " +
128
+ f"Device({device}) performance hint will be set to {perf_hint}.")
129
+ config[device][properties.hint.performance_mode()] = perf_hint
130
+ else:
131
+ logger.warning(f"Device {device} does not support performance hint property(-hint).")
132
+
133
+
134
+ def get_device_type_from_name(name) :
135
+ new_name = str(name)
136
+ new_name = new_name.split(".", 1)[0]
137
+ new_name = new_name.split("(", 1)[0]
138
+ return new_name
139
+
140
+ ## Set default values from dumped config
141
+ default_devices = set()
142
+ for device in devices:
143
+ device_type = get_device_type_from_name(device)
144
+ if device_type in config and device not in config:
145
+ config[device] = config[device_type].copy()
146
+ default_devices.add(device_type)
147
+
148
+ for def_device in default_devices:
149
+ config.pop(def_device)
150
+
151
+ perf_counts = False
152
+ # check if using the virtual device
153
+ hw_devices_list = devices.copy()
154
+ # Remove the hardware devices if AUTO/MULTI/HETERO appears in the devices list.
155
+ is_virtual = is_virtual_device_found(devices)
156
+ if is_virtual:
157
+ devices.clear()
158
+ # Parse out the currect virtual device as the target device.
159
+ virtual_device = device_name.partition(":")[0]
160
+ hw_devices_list.remove(virtual_device)
161
+ devices.append(virtual_device)
162
+ parse_value_for_virtual_device(virtual_device, device_number_streams)
163
+ parse_value_for_virtual_device(virtual_device, device_infer_precision)
164
+
165
+ for device in devices:
166
+ supported_properties = benchmark.core.get_property(device, properties.supported_properties())
167
+ if device not in config.keys():
168
+ config[device] = {}
169
+
170
+ ## high-level performance modes
171
+ set_performance_hint(device)
172
+
173
+ if is_flag_set_in_command_line('nireq'):
174
+ config[device][properties.hint.num_requests()] = str(args.number_infer_requests)
175
+
176
+ ## Set performance counter
177
+ if is_flag_set_in_command_line('pc'):
178
+ ## set to user defined value
179
+ config[device][properties.enable_profiling()] = True if args.perf_counts else False
180
+ elif properties.enable_profiling() in config[device].keys() and config[device][properties.enable_profiling()] == True:
181
+ logger.warning(f"Performance counters for {device} device is turned on. " +
182
+ "To print results use -pc option.")
183
+ elif args.report_type in [ averageCntReport, detailedCntReport ]:
184
+ logger.warning(f"Turn on performance counters for {device} device " +
185
+ f"since report type is {args.report_type}.")
186
+ config[device][properties.enable_profiling()] = True
187
+ elif args.exec_graph_path is not None:
188
+ logger.warning(f"Turn on performance counters for {device} device " +
189
+ "due to execution graph dumping.")
190
+ config[device][properties.enable_profiling()] = True
191
+ elif is_flag_set_in_command_line('pcsort'):
192
+ ## set to default value
193
+ logger.warning(f"Turn on performance counters for {device} device " +
194
+ f"since pcsort value is {args.perf_counts_sort}.")
195
+ config[device][properties.enable_profiling()] = True if args.perf_counts_sort else False
196
+ else:
197
+ ## set to default value
198
+ config[device][properties.enable_profiling()] = args.perf_counts
199
+ perf_counts = True if config[device][properties.enable_profiling()] == True else perf_counts
200
+
201
+ ## insert or append property into hw device properties list
202
+ def update_configs(hw_device, property_name, property_value):
203
+ (key, value) = properties.device.properties({hw_device:{property_name:property_value}})
204
+ # add property into hw device properties list.
205
+ if key not in config[device].keys():
206
+ config[device][key] = value
207
+ else:
208
+ current_config = config[device][key].get()
209
+ if hw_device not in current_config.keys():
210
+ current_config.update(value.get())
211
+ else:
212
+ current_device_config = current_config[hw_device]
213
+ for prop in value.get().items():
214
+ current_device_config.update(prop[1])
215
+ current_config[hw_device].update(current_device_config)
216
+ config[device][key].set(current_config)
217
+
218
+ def update_device_config_for_virtual_device(value, config, key):
219
+ # check if the element contains the hardware device property
220
+ if len(value.split(':')) == 1:
221
+ config[device][key] = device_infer_precision[device]
222
+ else:
223
+ # set device nstreams properties in the AUTO/MULTI plugin
224
+ value_vec = value[value.find('{') + 1:value.rfind('}')].split(',')
225
+ device_properties = {value_vec[i].split(':')[0] : value_vec[i].split(':')[1] for i in range(0, len(value_vec))}
226
+ for hw_device in device_properties.keys():
227
+ update_configs(hw_device, key, device_properties[hw_device])
228
+
229
+ ## infer precision
230
+ def set_infer_precision():
231
+ key = properties.hint.inference_precision()
232
+ if device in device_infer_precision.keys():
233
+ ## set to user defined value
234
+ if key in supported_properties:
235
+ config[device][key] = device_infer_precision[device]
236
+ elif is_virtual_device(device):
237
+ update_device_config_for_virtual_device(device_infer_precision[device], config, key)
238
+ else:
239
+ raise Exception(f"Device {device} doesn't support config key INFERENCE_PRECISION_HINT!" \
240
+ " Please specify -infer_precision for correct devices in format" \
241
+ " <dev1>:<infer_precision1>,<dev2>:<infer_precision2> or via configuration file.")
242
+ return
243
+
244
+ ## the rest are individual per-device settings (overriding the values the device will deduce from perf hint)
245
+ def set_throughput_streams():
246
+ key = get_device_type_from_name(device) + "_THROUGHPUT_STREAMS"
247
+ if device in device_number_streams.keys():
248
+ ## set to user defined value
249
+ if key in supported_properties:
250
+ config[device][key] = device_number_streams[device]
251
+ elif properties.streams.num() in supported_properties:
252
+ key = properties.streams.num()
253
+ config[device][key] = device_number_streams[device]
254
+ elif is_virtual_device(device):
255
+ key = properties.streams.num()
256
+ update_device_config_for_virtual_device(device_number_streams[device], config, key)
257
+ else:
258
+ raise Exception(f"Device {device} doesn't support config key '{key}'! " +
259
+ "Please specify -nstreams for correct devices in format <dev1>:<nstreams1>,<dev2>:<nstreams2>")
260
+ elif key not in config[device].keys() and args.api_type == "async" and key not in config[device].keys() \
261
+ and 'PERFORMANCE_HINT' in config[device].keys() and config[device]['PERFORMANCE_HINT'] == '':
262
+ ## set the _AUTO value for the #streams
263
+ logger.warning(f"-nstreams default value is determined automatically for {device} device. " +
264
+ "Although the automatic selection usually provides a reasonable performance, "
265
+ "but it still may be non-optimal for some cases, for more information look at README.")
266
+ if key in supported_properties:
267
+ config[device][key] = get_device_type_from_name(device) + "_THROUGHPUT_AUTO"
268
+ elif properties.streams.Num() in supported_properties:
269
+ key = properties.streams.Num()
270
+ config[device][key] = "-1" # Set AUTO mode for streams number
271
+ elif is_virtual_device(device):
272
+ # Set nstreams to default value auto if no nstreams specified from cmd line.
273
+ for hw_device in hw_devices_list:
274
+ hw_supported_properties = benchmark.core.get_property(hw_device, properties.supported_properties())
275
+ key = get_device_type_from_name(hw_device) + "_THROUGHPUT_STREAMS"
276
+ value = get_device_type_from_name(hw_device) + "_THROUGHPUT_AUTO"
277
+ if key not in hw_supported_properties:
278
+ key = properties.streams.Num()
279
+ value = properties.streams.Num.AUTO
280
+ if key in hw_supported_properties:
281
+ update_configs(hw_device, key, value)
282
+ if key in config[device].keys():
283
+ device_number_streams[device] = config[device][key]
284
+ return
285
+
286
+ def set_nthreads_pin(property_name, property_value):
287
+ if property_name == properties.affinity():
288
+ if property_value == "YES":
289
+ property_value = properties.Affinity.CORE
290
+ elif property_value == "NO":
291
+ property_value = properties.Affinity.NONE
292
+ if property_name in supported_properties or device_name == AUTO_DEVICE_NAME:
293
+ # create nthreads/pin primary property for HW device or AUTO if -d is AUTO directly.
294
+ config[device][property_name] = property_value
295
+ elif is_virtual:
296
+ # Create secondary property of -nthreads/-pin only for CPU if CPU device appears in the devices
297
+ # list specified by -d.
298
+ if CPU_DEVICE_NAME in hw_devices_list:
299
+ update_configs(CPU_DEVICE_NAME, property_name, property_value)
300
+ return
301
+
302
+ if args.number_threads and is_flag_set_in_command_line("nthreads"):
303
+ # limit threading for CPU portion of inference
304
+ set_nthreads_pin(properties.inference_num_threads(), str(args.number_threads))
305
+
306
+ if is_flag_set_in_command_line('pin'):
307
+ ## set for CPU to user defined value
308
+ set_nthreads_pin(properties.affinity(), args.infer_threads_pinning)
309
+
310
+ set_throughput_streams()
311
+ set_infer_precision()
312
+
313
+ if is_virtual_device(device):
314
+ if device in device_number_streams.keys():
315
+ del device_number_streams[device]
316
+
317
+ device_config = {}
318
+ for device in config:
319
+ if benchmark.device.find(device) == 0:
320
+ device_config = config[device]
321
+ if args.cache_dir:
322
+ benchmark.set_cache_dir(args.cache_dir)
323
+
324
+ ## If set batch size, disable the auto batching
325
+ if args.batch_size:
326
+ logger.warning("Batch size is set. Auto batching will be disabled")
327
+ device_config["ALLOW_AUTO_BATCHING"] = False
328
+
329
+ topology_name = ""
330
+ load_from_file_enabled = is_flag_set_in_command_line('load_from_file') or is_flag_set_in_command_line('lfile')
331
+ if load_from_file_enabled and not is_network_compiled:
332
+ if args.mean_values or args.scale_values:
333
+ raise RuntimeError("--mean_values and --scale_values aren't supported with --load_from_file. "
334
+ "The values can be set via model_optimizer while generating xml")
335
+ next_step()
336
+ print("Skipping the step for loading model from file")
337
+ next_step()
338
+ print("Skipping the step for loading model from file")
339
+ next_step()
340
+ print("Skipping the step for loading model from file")
341
+
342
+ # --------------------- 7. Loading the model to the device -------------------------------------------------
343
+ next_step()
344
+
345
+ start_time = datetime.utcnow()
346
+ compiled_model = benchmark.core.compile_model(args.path_to_model, benchmark.device, device_config)
347
+ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
348
+ logger.info(f"Compile model took {duration_ms} ms")
349
+ if statistics:
350
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
351
+ [
352
+ ('compile model time (ms)', duration_ms)
353
+ ])
354
+ app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.scale_values, args.mean_values, compiled_model.inputs)
355
+ batch_size = get_network_batch_size(app_inputs_info)
356
+ elif not is_network_compiled:
357
+ # --------------------- 4. Read the Intermediate Representation of the network -----------------------------
358
+ next_step()
359
+
360
+ logger.info("Loading model files")
361
+
362
+ start_time = datetime.utcnow()
363
+ model = benchmark.read_model(args.path_to_model)
364
+ topology_name = model.get_name()
365
+ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
366
+ logger.info(f"Read model took {duration_ms} ms")
367
+ logger.info("Original model I/O parameters:")
368
+ print_inputs_and_outputs_info(model)
369
+
370
+ if statistics:
371
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
372
+ [
373
+ ('read model time (ms)', duration_ms)
374
+ ])
375
+
376
+ # --------------------- 5. Resizing network to match image sizes and given batch ---------------------------
377
+ next_step()
378
+
379
+ app_inputs_info, reshape = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.scale_values, args.mean_values, model.inputs)
380
+
381
+ # use batch size according to provided layout and shapes
382
+ batch_size = get_network_batch_size(app_inputs_info)
383
+ logger.info(f'Model batch size: {batch_size}')
384
+
385
+ if reshape:
386
+ start_time = datetime.utcnow()
387
+ shapes = { info.name : info.partial_shape for info in app_inputs_info }
388
+ logger.info(
389
+ 'Reshaping model: {}'.format(', '.join("'{}': {}".format(k, str(v)) for k, v in shapes.items())))
390
+ model.reshape(shapes)
391
+ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
392
+ logger.info(f"Reshape model took {duration_ms} ms")
393
+ if statistics:
394
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
395
+ [
396
+ ('reshape model time (ms)', duration_ms)
397
+ ])
398
+
399
+ # --------------------- 6. Configuring inputs and outputs of the model --------------------------------------------------
400
+ next_step()
401
+
402
+ pre_post_processing(model, app_inputs_info, args.input_precision, args.output_precision, args.input_output_precision)
403
+ print_inputs_and_outputs_info(model)
404
+
405
+ # --------------------- 7. Loading the model to the device -------------------------------------------------
406
+ next_step()
407
+ start_time = datetime.utcnow()
408
+ compiled_model = benchmark.core.compile_model(model, benchmark.device, device_config)
409
+
410
+ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
411
+ logger.info(f"Compile model took {duration_ms} ms")
412
+ if statistics:
413
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
414
+ [
415
+ ('compile model time (ms)', duration_ms)
416
+ ])
417
+ else:
418
+ if args.mean_values or args.scale_values:
419
+ raise RuntimeError("--mean_values and --scale_values aren't supported for compiled model. "
420
+ "The values can be set via model_optimizer while generating xml")
421
+ next_step()
422
+ print("Skipping the step for compiled model")
423
+ next_step()
424
+ print("Skipping the step for compiled model")
425
+ next_step()
426
+ print("Skipping the step for compiled model")
427
+
428
+ # --------------------- 7. Loading the model to the device -------------------------------------------------
429
+ next_step()
430
+
431
+ start_time = datetime.utcnow()
432
+ compiled_model = benchmark.core.import_model(args.path_to_model, benchmark.device, device_config)
433
+ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
434
+ logger.info(f"Import model took {duration_ms} ms")
435
+ if statistics:
436
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
437
+ [
438
+ ('import model time (ms)', duration_ms)
439
+ ])
440
+ app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.scale_values, args.mean_values, compiled_model.inputs)
441
+ batch_size = get_network_batch_size(app_inputs_info)
442
+
443
+ # --------------------- 8. Querying optimal runtime parameters --------------------------------------------------
444
+ next_step()
445
+
446
+ ## actual device-deduced settings
447
+ keys = compiled_model.get_property(properties.supported_properties())
448
+ logger.info("Model:")
449
+ for k in keys:
450
+ skip_keys = (properties.supported_properties())
451
+ if k not in skip_keys:
452
+ value = compiled_model.get_property(k)
453
+ if k == properties.device.properties():
454
+ for device_key in value.keys():
455
+ logger.info(f' {device_key}:')
456
+ for k2, value2 in value.get(device_key).items():
457
+ if k2 not in skip_keys:
458
+ logger.info(f' {k2}: {value2}')
459
+ else:
460
+ logger.info(f' {k}: {value}')
461
+
462
+ # Update number of streams
463
+ for device in device_number_streams.keys():
464
+ try:
465
+ key = get_device_type_from_name(device) + '_THROUGHPUT_STREAMS'
466
+ device_number_streams[device] = compiled_model.get_property(key)
467
+ except:
468
+ key = 'NUM_STREAMS'
469
+ device_number_streams[device] = compiled_model.get_property(key)
470
+
471
+ # ------------------------------------ 9. Creating infer requests and preparing input data ----------------------
472
+ next_step()
473
+
474
+ # Create infer requests
475
+ requests = benchmark.create_infer_requests(compiled_model)
476
+
477
+ # Prepare input data
478
+ paths_to_input = list()
479
+ if args.paths_to_input:
480
+ for path in args.paths_to_input:
481
+ if ":" in next(iter(path), ""):
482
+ paths_to_input.extend(path)
483
+ else:
484
+ paths_to_input.append(os.path.abspath(*path))
485
+
486
+ data_queue = get_input_data(paths_to_input, app_inputs_info)
487
+
488
+ static_mode = check_for_static(app_inputs_info)
489
+ allow_inference_only_or_sync = can_measure_as_static(app_inputs_info)
490
+ if not allow_inference_only_or_sync and benchmark.api_type == 'sync':
491
+ raise Exception("Benchmarking of the model with dynamic shapes is available for async API only. "
492
+ "Please use -api async -hint latency -nireq 1 to emulate sync behavior.")
493
+
494
+ if benchmark.inference_only == None:
495
+ if static_mode:
496
+ benchmark.inference_only = True
497
+ else:
498
+ benchmark.inference_only = False
499
+ elif benchmark.inference_only and not allow_inference_only_or_sync:
500
+ raise Exception("Benchmarking dynamic model available with input filling in measurement loop only!")
501
+
502
+ # update batch size in case dynamic network with one data_shape
503
+ if allow_inference_only_or_sync and batch_size.is_dynamic:
504
+ batch_size = Dimension(data_queue.batch_sizes[data_queue.current_group_id])
505
+
506
+ benchmark.latency_groups = get_latency_groups(app_inputs_info)
507
+
508
+ if len(benchmark.latency_groups) > 1:
509
+ logger.info(f"Defined {len(benchmark.latency_groups)} tensor groups:")
510
+ for group in benchmark.latency_groups:
511
+ logger.info(f"\t{str(group)}")
512
+
513
+ # Iteration limit
514
+ benchmark.niter = get_number_iterations(benchmark.niter, benchmark.nireq, max(len(info.shapes) for info in app_inputs_info), benchmark.api_type)
515
+
516
+ # Set input tensors before first inference
517
+ for request in requests:
518
+ data_tensors = data_queue.get_next_input()
519
+ for port, data_tensor in data_tensors.items():
520
+ input_tensor = request.get_input_tensor(port)
521
+ if not static_mode:
522
+ input_tensor.shape = data_tensor.shape
523
+ if not len(input_tensor.shape):
524
+ input_tensor.data.flat[:] = data_tensor.data
525
+ else:
526
+ input_tensor.data[:] = data_tensor.data
527
+
528
+ if statistics:
529
+ statistics.add_parameters(StatisticsReport.Category.RUNTIME_CONFIG,
530
+ [
531
+ ('topology', topology_name),
532
+ ('target device', device_name),
533
+ ('API', args.api_type),
534
+ ('inference_only', benchmark.inference_only),
535
+ ('precision', "UNSPECIFIED"),
536
+ ('batch size', str(batch_size)),
537
+ ('number of iterations', str(benchmark.niter)),
538
+ ('number of parallel infer requests', str(benchmark.nireq)),
539
+ ('duration (ms)', str(get_duration_in_milliseconds(benchmark.duration_seconds))),
540
+ ])
541
+
542
+ for nstreams in device_number_streams.items():
543
+ statistics.add_parameters(StatisticsReport.Category.RUNTIME_CONFIG,
544
+ [
545
+ (f"number of {nstreams[0]} streams", str(nstreams[1])),
546
+ ])
547
+
548
+ # ------------------------------------ 10. Measuring performance -----------------------------------------------
549
+
550
+ output_string = process_help_inference_string(benchmark, device_number_streams)
551
+
552
+ next_step(additional_info=output_string)
553
+
554
+ if benchmark.inference_only:
555
+ logger.info("Benchmarking in inference only mode (inputs filling are not included in measurement loop).")
556
+ else:
557
+ logger.info("Benchmarking in full mode (inputs filling are included in measurement loop).")
558
+ duration_ms = f"{benchmark.first_infer(requests):.2f}"
559
+ logger.info(f"First inference took {duration_ms} ms")
560
+ if statistics:
561
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
562
+ [
563
+ ('first inference time (ms)', duration_ms)
564
+ ])
565
+
566
+ pcseq = args.pcseq
567
+ if static_mode or len(benchmark.latency_groups) == 1:
568
+ pcseq = False
569
+
570
+ fps, median_latency_ms, avg_latency_ms, min_latency_ms, max_latency_ms, total_duration_sec, iteration = benchmark.main_loop(requests, data_queue, batch_size, args.latency_percentile, pcseq)
571
+
572
+ # ------------------------------------ 11. Dumping statistics report -------------------------------------------
573
+ next_step()
574
+
575
+ if args.dump_config:
576
+ dump_config(args.dump_config, config)
577
+ logger.info(f"OpenVINO configuration settings were dumped to {args.dump_config}")
578
+
579
+ if args.exec_graph_path:
580
+ dump_exec_graph(compiled_model, args.exec_graph_path)
581
+
582
+ if perf_counts:
583
+ perfs_count_list = []
584
+ for request in requests:
585
+ perfs_count_list.append(request.profiling_info)
586
+
587
+ if args.perf_counts_sort:
588
+ total_sorted_list = print_perf_counters_sort(perfs_count_list,sort_flag=args.perf_counts_sort)
589
+ if statistics:
590
+ statistics.dump_performance_counters_sorted(total_sorted_list)
591
+
592
+ elif args.perf_counts:
593
+ print_perf_counters(perfs_count_list)
594
+
595
+ if statistics:
596
+ # if not args.perf_counts_sort:
597
+ statistics.dump_performance_counters(perfs_count_list)
598
+
599
+ if statistics:
600
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
601
+ [
602
+ ('total execution time (ms)', f'{get_duration_in_milliseconds(total_duration_sec):.2f}'),
603
+ ('total number of iterations', str(iteration)),
604
+ ])
605
+ if MULTI_DEVICE_NAME not in device_name:
606
+ latency_prefix = None
607
+ if args.latency_percentile == 50:
608
+ latency_prefix = 'latency (ms)'
609
+ elif args.latency_percentile != 50:
610
+ latency_prefix = 'latency (' + str(args.latency_percentile) + ' percentile) (ms)'
611
+ if latency_prefix:
612
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
613
+ [
614
+ (latency_prefix, f'{median_latency_ms:.2f}'),
615
+ ])
616
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
617
+ [
618
+ ("avg latency", f'{avg_latency_ms:.2f}'),
619
+ ])
620
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
621
+ [
622
+ ("min latency", f'{min_latency_ms:.2f}'),
623
+ ])
624
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
625
+ [
626
+ ("max latency", f'{max_latency_ms:.2f}'),
627
+ ])
628
+ if pcseq:
629
+ for group in benchmark.latency_groups:
630
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
631
+ [
632
+ ("group", str(group)),
633
+ ])
634
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
635
+ [
636
+ ("avg latency", f'{group.avg:.2f}'),
637
+ ])
638
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
639
+ [
640
+ ("min latency", f'{group.min:.2f}'),
641
+ ])
642
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
643
+ [
644
+ ("max latency", f'{group.max:.2f}'),
645
+ ])
646
+ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
647
+ [
648
+ ('throughput', f'{fps:.2f}'),
649
+ ])
650
+ statistics.dump()
651
+
652
+ try:
653
+ exeDevice = compiled_model.get_property("EXECUTION_DEVICES")
654
+ logger.info(f'Execution Devices:{exeDevice}')
655
+ except:
656
+ pass
657
+ logger.info(f'Count: {iteration} iterations')
658
+ logger.info(f'Duration: {get_duration_in_milliseconds(total_duration_sec):.2f} ms')
659
+ if MULTI_DEVICE_NAME not in device_name:
660
+ logger.info('Latency:')
661
+ if args.latency_percentile == 50:
662
+ logger.info(f' Median: {median_latency_ms:.2f} ms')
663
+ elif args.latency_percentile != 50:
664
+ logger.info(f' {args.latency_percentile} percentile: {median_latency_ms:.2f} ms')
665
+ logger.info(f' Average: {avg_latency_ms:.2f} ms')
666
+ logger.info(f' Min: {min_latency_ms:.2f} ms')
667
+ logger.info(f' Max: {max_latency_ms:.2f} ms')
668
+
669
+ if pcseq:
670
+ logger.info("Latency for each data shape group:")
671
+ for idx,group in enumerate(benchmark.latency_groups):
672
+ logger.info(f"{idx+1}.{str(group)}")
673
+ if args.latency_percentile == 50:
674
+ logger.info(f' Median: {group.median:.2f} ms')
675
+ elif args.latency_percentile != 50:
676
+ logger.info(f' {args.latency_percentile} percentile: {group.median:.2f} ms')
677
+ logger.info(f' Average: {group.avg:.2f} ms')
678
+ logger.info(f' Min: {group.min:.2f} ms')
679
+ logger.info(f' Max: {group.max:.2f} ms')
680
+
681
+ logger.info(f'Throughput: {fps:.2f} FPS')
682
+
683
+ del compiled_model
684
+
685
+ next_step.step_id = 0
686
+ except Exception as e:
687
+ logger.exception(e)
688
+
689
+ if statistics:
690
+ statistics.add_parameters(
691
+ StatisticsReport.Category.EXECUTION_RESULTS,
692
+ [('error', str(e))]
693
+ )
694
+ statistics.dump()
695
+ sys.exit(1)