bigdl-core-npu 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. bigdl-core-npu/__init__.py +0 -0
  2. bigdl-core-npu/common.lib +0 -0
  3. bigdl-core-npu/ggml.dll +0 -0
  4. bigdl-core-npu/ggml.lib +0 -0
  5. bigdl-core-npu/include/llamacpp/arg.h +77 -0
  6. bigdl-core-npu/include/llamacpp/common.h +563 -0
  7. bigdl-core-npu/include/llamacpp/ggml-alloc.h +76 -0
  8. bigdl-core-npu/include/llamacpp/ggml-backend.h +241 -0
  9. bigdl-core-npu/include/llamacpp/ggml.h +2679 -0
  10. bigdl-core-npu/include/llamacpp/llama.h +1234 -0
  11. bigdl-core-npu/include/llamacpp/log.h +92 -0
  12. bigdl-core-npu/include/npu/npu_common.h +119 -0
  13. bigdl-core-npu/include/npu/npu_llm.h +77 -0
  14. bigdl-core-npu/llama-cli-npu.exe +0 -0
  15. bigdl-core-npu/llama.dll +0 -0
  16. bigdl-core-npu/llama.lib +0 -0
  17. bigdl-core-npu/llm-cli.exe +0 -0
  18. bigdl-core-npu/npu_llm.dll +0 -0
  19. bigdl-core-npu/npu_llm.lib +0 -0
  20. bigdl-core-npu/zlib1.dll +0 -0
  21. bigdl_core_npu-2.6.0.data/scripts/init-llama-cpp.bat +29 -0
  22. {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/METADATA +12 -3
  23. {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/RECORD +146 -96
  24. {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/WHEEL +1 -1
  25. {bigdl_core_npu-2.5.0.dist-info → bigdl_core_npu-2.6.0.dist-info}/top_level.txt +1 -0
  26. intel_npu_acceleration_library/_version.py +1 -1
  27. intel_npu_acceleration_library/backend/base.py +39 -4
  28. intel_npu_acceleration_library/backend/bindings.py +109 -5
  29. intel_npu_acceleration_library/backend/factory.py +264 -47
  30. intel_npu_acceleration_library/backend/ops.py +2 -1
  31. intel_npu_acceleration_library/backend/qlinear.py +8 -4
  32. intel_npu_acceleration_library/backend/runtime.py +7 -2
  33. intel_npu_acceleration_library/backend/tensor.py +73 -3
  34. intel_npu_acceleration_library/bigdl-core-npu/cache.json +113732 -0
  35. intel_npu_acceleration_library/bigdl-core-npu/openvino.dll +0 -0
  36. intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_batch_plugin.dll +0 -0
  37. intel_npu_acceleration_library/bigdl-core-npu/openvino_auto_plugin.dll +0 -0
  38. intel_npu_acceleration_library/bigdl-core-npu/openvino_c.dll +0 -0
  39. intel_npu_acceleration_library/bigdl-core-npu/openvino_hetero_plugin.dll +0 -0
  40. intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_cpu_plugin.dll +0 -0
  41. intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_gpu_plugin.dll +0 -0
  42. intel_npu_acceleration_library/bigdl-core-npu/openvino_intel_npu_plugin.dll +0 -0
  43. intel_npu_acceleration_library/bigdl-core-npu/openvino_ir_frontend.dll +0 -0
  44. intel_npu_acceleration_library/bigdl-core-npu/openvino_onnx_frontend.dll +0 -0
  45. intel_npu_acceleration_library/bigdl-core-npu/openvino_paddle_frontend.dll +0 -0
  46. intel_npu_acceleration_library/bigdl-core-npu/openvino_pytorch_frontend.dll +0 -0
  47. intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_frontend.dll +0 -0
  48. intel_npu_acceleration_library/bigdl-core-npu/openvino_tensorflow_lite_frontend.dll +0 -0
  49. intel_npu_acceleration_library/bigdl-core-npu/tbb12.dll +0 -0
  50. intel_npu_acceleration_library/bigdl-core-npu/tbb12_debug.dll +0 -0
  51. intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5.dll +0 -0
  52. intel_npu_acceleration_library/bigdl-core-npu/tbbbind_2_5_debug.dll +0 -0
  53. intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc.dll +0 -0
  54. intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_debug.dll +0 -0
  55. intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy.dll +0 -0
  56. intel_npu_acceleration_library/bigdl-core-npu/tbbmalloc_proxy_debug.dll +0 -0
  57. intel_npu_acceleration_library/device.py +2 -2
  58. intel_npu_acceleration_library/dtypes.py +34 -1
  59. intel_npu_acceleration_library/external/openvino/__init__.py +1 -0
  60. intel_npu_acceleration_library/external/openvino/_offline_transformations/__init__.py +1 -0
  61. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp310-win_amd64.pyd +0 -0
  62. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp311-win_amd64.pyd +0 -0
  63. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp312-win_amd64.pyd +0 -0
  64. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp38-win_amd64.pyd +0 -0
  65. intel_npu_acceleration_library/external/openvino/_pyopenvino.cp39-win_amd64.pyd +0 -0
  66. intel_npu_acceleration_library/external/openvino/experimental/__init__.py +14 -0
  67. intel_npu_acceleration_library/external/openvino/frontend/jax/__init__.py +15 -0
  68. intel_npu_acceleration_library/external/openvino/frontend/jax/jaxpr_decoder.py +293 -0
  69. intel_npu_acceleration_library/external/openvino/frontend/jax/passes.py +65 -0
  70. intel_npu_acceleration_library/external/openvino/frontend/jax/utils.py +182 -0
  71. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp310-win_amd64.pyd +0 -0
  72. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp311-win_amd64.pyd +0 -0
  73. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp312-win_amd64.pyd +0 -0
  74. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp38-win_amd64.pyd +0 -0
  75. intel_npu_acceleration_library/external/openvino/frontend/onnx/py_onnx_frontend.cp39-win_amd64.pyd +0 -0
  76. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp310-win_amd64.pyd +0 -0
  77. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp311-win_amd64.pyd +0 -0
  78. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp312-win_amd64.pyd +0 -0
  79. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp38-win_amd64.pyd +0 -0
  80. intel_npu_acceleration_library/external/openvino/frontend/paddle/py_paddle_frontend.cp39-win_amd64.pyd +0 -0
  81. intel_npu_acceleration_library/external/openvino/frontend/pytorch/fx_decoder.py +37 -19
  82. intel_npu_acceleration_library/external/openvino/frontend/pytorch/gptq.py +47 -6
  83. intel_npu_acceleration_library/external/openvino/frontend/pytorch/patch_model.py +28 -8
  84. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp310-win_amd64.pyd +0 -0
  85. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp311-win_amd64.pyd +0 -0
  86. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp312-win_amd64.pyd +0 -0
  87. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp38-win_amd64.pyd +0 -0
  88. intel_npu_acceleration_library/external/openvino/frontend/pytorch/py_pytorch_frontend.cp39-win_amd64.pyd +0 -0
  89. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/backend.py +17 -5
  90. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/op_support.py +1 -0
  91. intel_npu_acceleration_library/external/openvino/frontend/pytorch/torchdynamo/partition.py +55 -47
  92. intel_npu_acceleration_library/external/openvino/frontend/pytorch/ts_decoder.py +95 -63
  93. intel_npu_acceleration_library/external/openvino/frontend/pytorch/utils.py +12 -10
  94. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp310-win_amd64.pyd +0 -0
  95. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp311-win_amd64.pyd +0 -0
  96. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp312-win_amd64.pyd +0 -0
  97. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp38-win_amd64.pyd +0 -0
  98. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/py_tensorflow_frontend.cp39-win_amd64.pyd +0 -0
  99. intel_npu_acceleration_library/external/openvino/frontend/tensorflow/utils.py +31 -10
  100. intel_npu_acceleration_library/external/openvino/helpers/packing.py +4 -4
  101. intel_npu_acceleration_library/external/openvino/preprocess/__init__.py +2 -0
  102. intel_npu_acceleration_library/external/openvino/preprocess/torchvision/requirements.txt +1 -0
  103. intel_npu_acceleration_library/external/openvino/properties/__init__.py +1 -0
  104. intel_npu_acceleration_library/external/openvino/runtime/ie_api.py +1 -1
  105. intel_npu_acceleration_library/external/openvino/runtime/op/__init__.py +1 -0
  106. intel_npu_acceleration_library/external/openvino/runtime/opset1/ops.py +2 -1
  107. intel_npu_acceleration_library/external/openvino/runtime/opset13/ops.py +5 -6
  108. intel_npu_acceleration_library/external/openvino/runtime/opset15/__init__.py +7 -0
  109. intel_npu_acceleration_library/external/openvino/runtime/opset15/ops.py +193 -2
  110. intel_npu_acceleration_library/external/openvino/runtime/opset6/ops.py +69 -43
  111. intel_npu_acceleration_library/external/openvino/runtime/opset8/ops.py +4 -0
  112. intel_npu_acceleration_library/external/openvino/runtime/properties/__init__.py +2 -0
  113. intel_npu_acceleration_library/external/openvino/runtime/utils/data_helpers/data_dispatcher.py +21 -3
  114. intel_npu_acceleration_library/external/openvino/runtime/utils/decorators.py +88 -2
  115. intel_npu_acceleration_library/external/openvino/tools/benchmark/utils/inputs_filling.py +9 -9
  116. intel_npu_acceleration_library/external/openvino/tools/ovc/convert_impl.py +16 -2
  117. intel_npu_acceleration_library/external/openvino/tools/ovc/main.py +5 -0
  118. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/jax_frontend_utils.py +19 -0
  119. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pipeline.py +68 -16
  120. intel_npu_acceleration_library/external/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +69 -60
  121. intel_npu_acceleration_library/external/openvino/tools/ovc/utils.py +90 -3
  122. intel_npu_acceleration_library/external/openvino/utils.py +17 -0
  123. intel_npu_acceleration_library/lib/Release/intel_npu_acceleration_library.dll +0 -0
  124. intel_npu_acceleration_library/lib/Release/openvino.dll +0 -0
  125. intel_npu_acceleration_library/lib/Release/openvino_auto_batch_plugin.dll +0 -0
  126. intel_npu_acceleration_library/lib/Release/openvino_auto_plugin.dll +0 -0
  127. intel_npu_acceleration_library/lib/Release/openvino_c.dll +0 -0
  128. intel_npu_acceleration_library/lib/Release/openvino_hetero_plugin.dll +0 -0
  129. intel_npu_acceleration_library/lib/Release/openvino_intel_cpu_plugin.dll +0 -0
  130. intel_npu_acceleration_library/lib/Release/openvino_intel_gpu_plugin.dll +0 -0
  131. intel_npu_acceleration_library/lib/Release/openvino_intel_npu_plugin.dll +0 -0
  132. intel_npu_acceleration_library/lib/Release/openvino_ir_frontend.dll +0 -0
  133. intel_npu_acceleration_library/lib/Release/openvino_onnx_frontend.dll +0 -0
  134. intel_npu_acceleration_library/lib/Release/openvino_paddle_frontend.dll +0 -0
  135. intel_npu_acceleration_library/lib/Release/openvino_pytorch_frontend.dll +0 -0
  136. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_frontend.dll +0 -0
  137. intel_npu_acceleration_library/lib/Release/openvino_tensorflow_lite_frontend.dll +0 -0
  138. intel_npu_acceleration_library/lib/Release/tbb12.dll +0 -0
  139. intel_npu_acceleration_library/lib/Release/tbb12_debug.dll +0 -0
  140. intel_npu_acceleration_library/lib/Release/tbbbind_2_5.dll +0 -0
  141. intel_npu_acceleration_library/lib/Release/tbbbind_2_5_debug.dll +0 -0
  142. intel_npu_acceleration_library/lib/Release/tbbmalloc.dll +0 -0
  143. intel_npu_acceleration_library/lib/Release/tbbmalloc_debug.dll +0 -0
  144. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy.dll +0 -0
  145. intel_npu_acceleration_library/lib/Release/tbbmalloc_proxy_debug.dll +0 -0
  146. intel_npu_acceleration_library/nn/module.py +17 -17
@@ -266,7 +266,7 @@ def get_numpy_tensors(numpy_paths: List[str], info: AppInputInfo, batch_sizes: L
266
266
  else:
267
267
  try:
268
268
  if info.layout.has_name("N"):
269
- numpy_arrays[[None] * info.layout.get_index_by_name("N") + [b]] = numpy_arr
269
+ numpy_arrays[[None] * info.layout.get_index_by_name("N") + [b]] = numpy_arr[b]
270
270
  else:
271
271
  numpy_arrays = numpy_arr
272
272
  except ValueError:
@@ -291,7 +291,7 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
291
291
  tensors = []
292
292
  for i in range(niter):
293
293
  shape_id = i % num_shapes
294
- dtype = get_dtype(info.element_type)
294
+ dtype = np.uint8() if info.element_type.bitwidth < 8 else get_dtype(info.element_type)
295
295
  shape = list(info.shapes[shape_id])
296
296
  binaries = np.ndarray(shape=shape, dtype=dtype)
297
297
  binary_index = processed_frames
@@ -301,14 +301,14 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
301
301
  binary_filename: str = binary_paths[binary_index]
302
302
  extension = binary_filename.lower().split('.')[-1]
303
303
  if extension == "bin":
304
- binary_file_size = os.path.getsize(binary_filename)
305
- blob_size = dtype.itemsize * int(np.prod(shape))
306
- if blob_size != binary_file_size:
304
+ binary_file_bit_size = os.path.getsize(binary_filename) * 8
305
+ blob_bit_size = info.element_type.bitwidth * int(np.prod(shape))
306
+ if blob_bit_size != binary_file_bit_size:
307
307
  raise Exception(
308
- f"File {binary_filename} contains {binary_file_size} bytes but model expects {blob_size}")
309
- from_file = np.reshape(np.fromfile(binary_filename, dtype), shape)
308
+ f"File {binary_filename} contains {binary_file_bit_size} bites but model expects {blob_bit_size}")
309
+ from_file = np.fromfile(binary_filename, dtype)
310
310
  if info.layout.has_name("N"):
311
- binaries[[None] * info.layout.get_index_by_name("N") + [b]] = from_file
311
+ binaries[[None] * info.layout.get_index_by_name("N") + [b]] = from_file[b]
312
312
  else:
313
313
  binaries = from_file
314
314
  else:
@@ -317,7 +317,7 @@ def get_binary_tensors(binary_paths: List[str], info: AppInputInfo, batch_sizes:
317
317
 
318
318
  binary_index += 1
319
319
  processed_frames += current_batch_size
320
- tensors.append(Tensor(binaries))
320
+ tensors.append(Tensor(binaries, shape, info.element_type))
321
321
  return tensors
322
322
 
323
323
 
@@ -36,6 +36,10 @@ from openvino.tools.ovc.telemetry_utils import send_params_info, send_conversion
36
36
  init_mo_telemetry
37
37
  from openvino.tools.ovc.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example
38
38
  from openvino.tools.ovc.moc_frontend.paddle_frontend_utils import paddle_frontend_converter
39
+ try:
40
+ from openvino.tools.ovc.moc_frontend.jax_frontend_utils import get_jax_decoder
41
+ except:
42
+ get_jax_decoder = None
39
43
 
40
44
  # pylint: disable=no-name-in-module,import-error
41
45
  from openvino.frontend import FrontEndManager, OpConversionFailure, TelemetryExtension
@@ -228,6 +232,11 @@ def check_model_object(argv):
228
232
  paddle.fluid.dygraph.layers.Layer) or isinstance(
229
233
  model, paddle.fluid.executor.Executor):
230
234
  return "paddle"
235
+
236
+ if 'jax' in sys.modules:
237
+ import jax
238
+ if isinstance(model, (jax.core.Jaxpr, jax.core.ClosedJaxpr)):
239
+ return "jax"
231
240
 
232
241
  raise Error('Unknown model type: {}'.format(type(model)))
233
242
 
@@ -319,6 +328,7 @@ def normalize_inputs(argv: argparse.Namespace):
319
328
  """
320
329
  # Parse input to list of InputCutInfo
321
330
  inputs = input_to_input_cut_info(argv.input)
331
+ argv.input = inputs
322
332
 
323
333
  # Make list of input names
324
334
  input_names_list = []
@@ -329,8 +339,6 @@ def normalize_inputs(argv: argparse.Namespace):
329
339
  assert len(input_names_list) == len(inputs), "\"input\" parameter has unnamed inputs and named inputs. " \
330
340
  "Please either set names for all inputs, " \
331
341
  "or do not set names for all inputs."
332
- argv.inputs_list = input_names_list
333
- argv.input = ','.join(input_names_list)
334
342
 
335
343
  if len(input_names_list) > 0:
336
344
  # Named inputs case
@@ -462,6 +470,12 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
462
470
  outputs)
463
471
  pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel()
464
472
  args['input_model'] = pdmodel
473
+ if model_framework == "jax":
474
+ if get_jax_decoder is not None:
475
+ get_jax_decoder(args['input_model'], args)
476
+ else:
477
+ raise Error("JAX Frontend is not available.")
478
+
465
479
 
466
480
  argv = pack_params_to_args_namespace(args, cli_parser, python_api_used)
467
481
  argv.framework = model_framework
@@ -10,6 +10,11 @@ except ImportError:
10
10
  import openvino.tools.ovc.telemetry_stub as tm
11
11
  from openvino.tools.ovc.convert_impl import _convert
12
12
  from openvino.tools.ovc.cli_parser import get_model_name_from_args
13
+ from openvino.tools.ovc.utils import import_openvino_tokenizers
14
+
15
+ # TODO 131000: temporal workaround to patch OpenVINO Core and frontends with tokenizers extensions
16
+ # make OVC tool to convert models requiring openvino-tokenizers extensions
17
+ import_openvino_tokenizers()
13
18
 
14
19
  # pylint: disable=no-name-in-module,import-error
15
20
  from openvino.runtime import save_model
@@ -0,0 +1,19 @@
1
+ # Copyright (C) 2018-2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import logging as log
5
+
6
+
7
+ def get_jax_decoder(model, args):
8
+ try:
9
+ from openvino.frontend.jax.jaxpr_decoder import JaxprPythonDecoder
10
+ except Exception as e:
11
+ log.error("JAX frontend loading failed")
12
+ raise e
13
+
14
+ if not isinstance(model, JaxprPythonDecoder):
15
+ decoder = JaxprPythonDecoder(model)
16
+ else:
17
+ decoder = model
18
+
19
+ args['input_model'] = decoder
@@ -72,12 +72,14 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
72
72
  outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
73
73
  input_model.override_all_outputs([x['node'] for x in outputs])
74
74
  '''
75
- argv.placeholder_shapes, argv.placeholder_data_types = convert_params_lists_to_dicts(
76
- input_model, argv.placeholder_shapes, argv.placeholder_data_types)
77
75
 
78
- user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
79
- input_model, argv.placeholder_shapes, argv.placeholder_data_types,
80
- argv.output, {}, moc_front_end.get_name())
76
+ enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
77
+ if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
78
+ # NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
79
+ framework_model = moc_front_end.decode(input_model)
80
+ json_model_analysis_dump(framework_model)
81
+ # a model is not processed further in json analysis mode
82
+ sys.exit(0)
81
83
 
82
84
  def check_places_are_same(places_original: List[Place], places_new: List[Place]):
83
85
  """
@@ -90,6 +92,67 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
90
92
  [item for item in places_original if any(
91
93
  [item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original)
92
94
 
95
+ if getattr(argv, "framework", None) == "pytorch":
96
+ iplaces = []
97
+ for idx, input_info in enumerate(argv.input):
98
+ if getattr(input_info, "name", None):
99
+ place = input_model.get_place_by_tensor_name(input_info.name)
100
+ if not input_info.shape and not input_info.type:
101
+ # If we received place by name, we need to use it for FE to verify
102
+ # that such name exist, otherwise we silently ignore it.
103
+ # Using dynamic shape should be safe, because FE will not overwrite
104
+ # the shape that was produced after conversion, but merge it, so
105
+ # dynamic shape will not change anything.
106
+ input_model.set_partial_shape(place, PartialShape.dynamic())
107
+ else:
108
+ place = input_model.get_place_by_input_index(idx)
109
+ iplaces.append(place)
110
+ if input_info.shape is not None:
111
+ input_model.set_partial_shape(place, input_info.shape)
112
+ if input_info.type is not None:
113
+ input_model.set_element_type(place, input_info.type)
114
+ model_inputs = input_model.get_inputs()
115
+ def merge_inputs(inputs, to_set_list):
116
+ # use input places instead of obtained by index if they are the same
117
+ res = []
118
+ for p in to_set_list:
119
+ found = False
120
+ for i in inputs:
121
+ if p.is_equal(i):
122
+ res.append(i)
123
+ found = True
124
+ break
125
+ if not found:
126
+ res.append(p)
127
+ return res
128
+ iplaces = merge_inputs(model_inputs, iplaces)
129
+ # Currently this only work to reorder inputs/outputs
130
+ to_override_all_inputs = check_places_are_same(model_inputs, [{"node": p} for p in iplaces])
131
+ to_override_all_outputs = False
132
+ if argv.output:
133
+ oplaces = []
134
+ _outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
135
+ for out_desc in _outputs:
136
+ oplaces.append(out_desc["name"])
137
+ model_outputs = input_model.get_outputs()
138
+ to_override_all_outputs = check_places_are_same(model_outputs, [{"node": p} for p in oplaces])
139
+ if to_override_all_inputs and to_override_all_outputs:
140
+ input_model.extract_subgraph(iplaces, oplaces)
141
+ elif to_override_all_inputs:
142
+ input_model.override_all_inputs(iplaces)
143
+ elif to_override_all_outputs:
144
+ input_model.override_all_outputs(oplaces)
145
+
146
+ ov_model = moc_front_end.convert(input_model)
147
+ return ov_model
148
+
149
+ argv.placeholder_shapes, argv.placeholder_data_types = convert_params_lists_to_dicts(
150
+ input_model, argv.placeholder_shapes, argv.placeholder_data_types)
151
+
152
+ user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
153
+ input_model, argv.placeholder_shapes, argv.placeholder_data_types,
154
+ argv.output, {}, moc_front_end.get_name())
155
+
93
156
  def add_names_to_tensors(model: InputModel, places: List[Place]):
94
157
  """
95
158
  Adds additional names to some model input tensors. This helper should be used
@@ -107,14 +170,6 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
107
170
  log.warning('Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'.format(
108
171
  new_input['input_name'], str(e)))
109
172
 
110
- enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
111
- if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
112
- # NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
113
- framework_model = moc_front_end.decode(input_model)
114
- json_model_analysis_dump(framework_model)
115
- # a model is not processed further in json analysis mode
116
- sys.exit(0)
117
-
118
173
  model_inputs = input_model.get_inputs()
119
174
  inputs_equal = True
120
175
  if user_shapes:
@@ -238,9 +293,6 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
238
293
 
239
294
  input_model.set_tensor_value(place, value)
240
295
 
241
- def shape_to_array(shape: PartialShape):
242
- return [shape.get_dimension(i) for i in range(shape.rank.get_length())]
243
-
244
296
  ov_model = moc_front_end.convert(input_model)
245
297
 
246
298
  return ov_model
@@ -8,6 +8,7 @@ import numpy as np
8
8
  # pylint: disable=no-name-in-module,import-error
9
9
  from openvino.runtime import Tensor, PartialShape
10
10
  from openvino.tools.ovc.error import Error
11
+ from openvino.tools.ovc.cli_parser import single_input_to_input_cut_info, _InputCutInfo
11
12
 
12
13
 
13
14
 
@@ -90,76 +91,84 @@ def get_value_from_list_or_dict(container, name, idx):
90
91
  return None
91
92
 
92
93
 
94
+ def flatten_inputs(inputs, names=None):
95
+ flattened = []
96
+ if isinstance(inputs, dict):
97
+ # if names are provided we need to unpack in the same order
98
+ if names:
99
+ for name in names:
100
+ if isinstance(inputs[name], (list, tuple, dict)):
101
+ flattened.extend(flatten_inputs(inputs[name]))
102
+ else:
103
+ flattened.append((name, inputs[name]))
104
+ else:
105
+ for name, input_data in inputs.items():
106
+ if isinstance(input_data, (list, tuple, dict)):
107
+ flattened.extend(flatten_inputs(input_data))
108
+ else:
109
+ flattened.append((name, input_data))
110
+ else:
111
+ for input_data in inputs:
112
+ if isinstance(input_data, (list, tuple, dict)):
113
+ flattened.extend(flatten_inputs(input_data))
114
+ else:
115
+ flattened.append(input_data)
116
+ return flattened
117
+
118
+
93
119
  def extract_input_info_from_example(args, inputs):
94
120
  try:
95
121
  from openvino.frontend.pytorch.utils import pt_to_ov_type_map # pylint: disable=no-name-in-module,import-error
96
122
  except Exception as e:
97
123
  log.error("PyTorch frontend loading failed")
98
124
  raise e
99
- example_inputs = args.example_input
100
- data_types = args.placeholder_data_types or {}
101
- input_shapes = args.placeholder_shapes or {}
125
+ example_inputs = args.input_model._example_input if args.input_model._example_input is not None else args.example_input
126
+ if example_inputs is None:
127
+ return
102
128
  is_dict_input = isinstance(example_inputs, dict)
103
- list_inputs = list(example_inputs.values()) if is_dict_input else example_inputs
104
- input_names = None
105
129
  if not isinstance(example_inputs, (list, tuple, dict)):
106
- list_inputs = [list_inputs]
107
- if args.input_model._input_is_list:
108
- list_inputs[0] = list_inputs[0].unsqueeze(0)
109
- if args.input_model._input_signature is not None and not is_dict_input:
130
+ example_inputs = [example_inputs]
131
+ input_names = None
132
+ if args.input_model._input_signature is not None:
110
133
  input_names = args.input_model._input_signature[1:] if args.input_model._input_signature[
111
134
  0] == "self" else args.input_model._input_signature
112
- if not is_dict_input:
113
- example_inputs = dict(zip(input_names, list_inputs))
114
- is_dict_input = True
115
- elif is_dict_input:
116
- input_names = list(example_inputs)
117
- if not data_types and input_names is None:
118
- data_types = []
119
- if not input_shapes and input_names is None:
120
- input_shapes = []
121
- if inputs:
122
- for input_id, input_info in enumerate(inputs):
123
- input_name = input_info.name
124
- if is_dict_input and input_name in example_inputs:
125
- example_input = example_inputs[input_name]
126
- else:
127
- example_input = list_inputs[input_id]
128
- if is_dict_input and input_name is None:
129
- input_name = input_names[input_id]
130
- dtype = getattr(example_input, "dtype", type(example_input))
131
- example_dtype = pt_to_ov_type_map.get(str(dtype))
132
- user_dtype = get_value_from_list_or_dict(data_types, input_name, input_id)
133
- if user_dtype is not None and example_dtype is not None and example_dtype != user_dtype:
134
- raise Error(
135
- f"Defined input type {user_dtype} is not equal to provided example_input type {example_dtype}")
136
-
137
- data_rank = getattr(example_input, "ndim", 0)
138
- user_input_shape = get_value_from_list_or_dict(input_shapes, input_name, input_id)
139
- if user_input_shape.rank.is_static and user_input_shape.rank.get_length() != data_rank:
140
- raise Error(
141
- f"Requested input shape {user_input_shape.rank.get_length()} rank"
142
- f" is not equal to provided example_input rank {data_rank}")
143
-
144
- input_shape = user_input_shape if user_input_shape is not None else PartialShape([-1] * data_rank)
145
- update_list_or_dict(data_types, input_name, input_id,
146
- example_dtype if example_dtype is not None else None)
147
- update_list_or_dict(input_shapes, input_name, input_id, input_shape)
148
- else:
149
- for input_id, example_input in enumerate(list_inputs):
150
- dtype = getattr(example_input, "dtype", type(example_input))
151
- ov_dtype = pt_to_ov_type_map.get(str(dtype))
152
- data_rank = getattr(example_input, "ndim", 0)
153
- input_shape = PartialShape([-1] * data_rank)
154
- input_name = input_names[input_id] if input_names else None
155
- update_list_or_dict(input_shapes, input_name, input_id, input_shape)
156
- update_list_or_dict(data_types, input_name, input_id, ov_dtype if ov_dtype is not None else None)
157
-
158
- args.placeholder_data_types = data_types
159
- args.placeholder_shapes = input_shapes
160
- if not args.input and input_names:
161
- args.input_list = input_names
162
- args.input = ",".join(input_names)
135
+ if input_names and not is_dict_input:
136
+ example_inputs = dict(zip(input_names, example_inputs))
137
+ example_inputs = flatten_inputs(example_inputs, input_names)
138
+ input_arg = []
139
+ for example in example_inputs:
140
+ name = None
141
+ if isinstance(example, tuple) and len(example) == 2:
142
+ name = example[0]
143
+ example = example[1]
144
+ shape = PartialShape([-1] * example.ndim) if hasattr(example, "ndim") else PartialShape.dynamic()
145
+ dtype = getattr(example, "dtype", type(example))
146
+ dtype = pt_to_ov_type_map.get(str(dtype))
147
+ if name:
148
+ input_arg.append(single_input_to_input_cut_info((name, shape, dtype)))
149
+ else:
150
+ input_arg.append(single_input_to_input_cut_info((shape, dtype)))
151
+ if inputs is not None and len(inputs) != 0:
152
+ if len(inputs) == len(input_arg):
153
+ # we can update input argument with info from examples
154
+ new_input = []
155
+ for i in range(len(input_arg)):
156
+ input_desc = args.input[i]
157
+ name = input_desc.name
158
+ dtype = input_desc.type
159
+ shape = input_desc.shape
160
+ if name is None:
161
+ name = input_arg[i].name
162
+ if dtype is None:
163
+ dtype = input_arg[i].type
164
+ if shape is None:
165
+ shape = input_arg[i].shape
166
+ new_input.append(_InputCutInfo(name, shape, dtype, input_desc.value))
167
+ input_arg = new_input
168
+ else:
169
+ # we can't update args.input
170
+ return
171
+ args.input = input_arg
163
172
 
164
173
 
165
174
  # pylint: disable=no-member
@@ -1,11 +1,13 @@
1
1
  # Copyright (C) 2018-2024 Intel Corporation
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
- import os
5
- from typing import Iterable, Union
6
-
4
+ import importlib.util
5
+ import logging as log
7
6
  import numpy as np
7
+ import os
8
+ import sys
8
9
  from openvino.tools.ovc.error import Error
10
+ from typing import Iterable, Union
9
11
 
10
12
  try:
11
13
  import openvino_telemetry as tm
@@ -13,6 +15,11 @@ try:
13
15
  except ImportError:
14
16
  import openvino.tools.ovc.telemetry_stub as tm
15
17
 
18
+ if sys.version_info < (3, 8):
19
+ import importlib_metadata
20
+ else:
21
+ import importlib.metadata as importlib_metadata
22
+
16
23
  dynamic_dimension = np.ma.masked
17
24
 
18
25
 
@@ -107,3 +114,83 @@ def get_ir_version():
107
114
  :return: the IR version
108
115
  """
109
116
  return 11
117
+
118
+
119
+ def import_openvino_tokenizers():
120
+ # extract openvino version
121
+ if importlib.util.find_spec("openvino") is None:
122
+ return False
123
+ try:
124
+ from openvino import get_version
125
+ openvino_version = get_version()
126
+ openvino_available = True
127
+ except ImportError:
128
+ openvino_available = False
129
+ if not openvino_available:
130
+ return False
131
+
132
+ if importlib.util.find_spec("openvino_tokenizers") is None:
133
+ return False
134
+
135
+ try:
136
+ pip_metadata_version = importlib_metadata.version("openvino")
137
+ except importlib_metadata.PackageNotFoundError:
138
+ pip_metadata_version = False
139
+ try:
140
+ pip_metadata_version = importlib_metadata.version("openvino-nightly")
141
+ is_nightly = True
142
+ except importlib_metadata.PackageNotFoundError:
143
+ is_nightly = False
144
+
145
+ try:
146
+ import openvino_tokenizers # pylint: disable=no-name-in-module,import-error
147
+
148
+ openvino_tokenizers._get_factory()
149
+ except RuntimeError:
150
+ tokenizers_version = openvino_tokenizers.__version__
151
+
152
+ if tokenizers_version == "0.0.0.0":
153
+ try:
154
+ tokenizers_version = importlib_metadata.version("openvino_tokenizers") or tokenizers_version
155
+ except importlib_metadata.PackageNotFoundError:
156
+ pass
157
+ message = (
158
+ "OpenVINO and OpenVINO Tokenizers versions are not binary compatible.\n"
159
+ f"OpenVINO version: {openvino_version}\n"
160
+ f"OpenVINO Tokenizers version: {tokenizers_version}\n"
161
+ "First 3 numbers should be the same. Update OpenVINO Tokenizers to compatible version. "
162
+ )
163
+ if not pip_metadata_version:
164
+ message += (
165
+ "For archive installation of OpenVINO try to build OpenVINO Tokenizers from source: "
166
+ "https://github.com/openvinotoolkit/openvino_tokenizers/tree/master?tab=readme-ov-file"
167
+ "#build-and-install-from-source"
168
+ )
169
+ if sys.platform == "linux":
170
+ message += (
171
+ "\nThe PyPI version of OpenVINO Tokenizers is built on CentOS and may not be compatible with other "
172
+ "Linux distributions; rebuild OpenVINO Tokenizers from source."
173
+ )
174
+ else:
175
+ message += (
176
+ "It is recommended to use the same day builds for pre-release version. "
177
+ "To install both OpenVINO and OpenVINO Tokenizers release version perform:\n"
178
+ )
179
+ if is_nightly:
180
+ message += "pip uninstall -y openvino-nightly && "
181
+ message += "pip install --force-reinstall openvino openvino-tokenizers\n"
182
+ if is_nightly:
183
+ message += (
184
+ "openvino-nightly package will be deprecated in the future - use pre-release drops instead. "
185
+ )
186
+ message += "To update both OpenVINO and OpenVINO Tokenizers to the latest pre-release version perform:\n"
187
+ if is_nightly:
188
+ message += "pip uninstall -y openvino-nightly && "
189
+ message += (
190
+ "pip install --pre -U openvino openvino-tokenizers "
191
+ "--extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly"
192
+ )
193
+ log.warning(message)
194
+ return False
195
+
196
+ return True
@@ -6,6 +6,7 @@ import os
6
6
  import sys
7
7
  from functools import wraps
8
8
  from typing import Callable, Any
9
+ from pathlib import Path
9
10
 
10
11
 
11
12
  def _add_openvino_libs_to_search_path() -> None:
@@ -38,6 +39,22 @@ def _add_openvino_libs_to_search_path() -> None:
38
39
  os.add_dll_directory(os.path.abspath(lib_path))
39
40
 
40
41
 
42
+ def get_cmake_path() -> str:
43
+ """Searches for the directory containing CMake files within the package install directory.
44
+
45
+ :return: The path to the directory containing CMake files, if found. Otherwise, returns empty string.
46
+ :rtype: str
47
+ """
48
+ package_path = Path(__file__).parent
49
+ cmake_file = "OpenVINOConfig.cmake"
50
+
51
+ for dirpath, _, filenames in os.walk(package_path):
52
+ if cmake_file in filenames:
53
+ return dirpath
54
+
55
+ return ""
56
+
57
+
41
58
  def deprecated(name: Any = None, version: str = "", message: str = "", stacklevel: int = 2) -> Callable[..., Any]:
42
59
  """Prints deprecation warning "{function_name} is deprecated and will be removed in version {version}. {message}" and runs the function.
43
60
 
@@ -67,23 +67,23 @@ def compute_input_signature(
67
67
  return "_".join(signature)
68
68
 
69
69
 
70
- def patch_parameters(module: torch.nn.Module, model: NNFactory, recurse: bool = False):
71
- """Patch the parameters of a PyTorch module with constants.
72
-
73
- Args:
74
- module (torch.nn.Module): The PyTorch module.
75
- model (NNFactory): The NNFactory instance.
76
- recurse (bool, optional): Recurse over all submodules. Defaults to False.
77
- """
78
- elements = list(module.named_parameters(recurse=recurse))
79
- for name, param in elements:
80
- del module._parameters[name]
81
- setattr(module, name, model.constant(param.data.detach().numpy()))
82
-
83
- buffers = list(module.named_buffers(recurse=recurse))
84
- for name, param in buffers:
85
- del module._buffers[name]
86
- setattr(module, name, model.constant(param.data.detach().numpy()))
70
+ # def patch_parameters(module: torch.nn.Module, model: NNFactory, recurse: bool = False):
71
+ # """Patch the parameters of a PyTorch module with constants.
72
+
73
+ # Args:
74
+ # module (torch.nn.Module): The PyTorch module.
75
+ # model (NNFactory): The NNFactory instance.
76
+ # recurse (bool, optional): Recurse over all submodules. Defaults to False.
77
+ # """
78
+ # elements = list(module.named_parameters(recurse=recurse))
79
+ # for name, param in elements:
80
+ # del module._parameters[name]
81
+ # setattr(module, name, model.constant(param.data.detach().numpy()))
82
+
83
+ # buffers = list(module.named_buffers(recurse=recurse))
84
+ # for name, param in buffers:
85
+ # del module._buffers[name]
86
+ # setattr(module, name, model.constant(param.data.detach().numpy()))
87
87
 
88
88
 
89
89
  def patch_modules(module: torch.nn.Module, model: NNFactory):