paddlex 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. paddlex/.version +1 -1
  2. paddlex/inference/models/base/predictor/base_predictor.py +2 -0
  3. paddlex/inference/models/common/static_infer.py +20 -14
  4. paddlex/inference/models/common/ts/funcs.py +19 -8
  5. paddlex/inference/models/formula_recognition/predictor.py +1 -1
  6. paddlex/inference/models/formula_recognition/processors.py +2 -2
  7. paddlex/inference/models/text_recognition/result.py +1 -1
  8. paddlex/inference/pipelines/layout_parsing/layout_objects.py +859 -0
  9. paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +144 -205
  10. paddlex/inference/pipelines/layout_parsing/result_v2.py +13 -272
  11. paddlex/inference/pipelines/layout_parsing/setting.py +1 -0
  12. paddlex/inference/pipelines/layout_parsing/utils.py +108 -312
  13. paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py +302 -247
  14. paddlex/inference/pipelines/layout_parsing/xycut_enhanced/xycuts.py +156 -104
  15. paddlex/inference/pipelines/ocr/result.py +2 -2
  16. paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +1 -1
  17. paddlex/inference/serving/basic_serving/_app.py +47 -13
  18. paddlex/inference/serving/infra/utils.py +22 -17
  19. paddlex/inference/utils/hpi.py +60 -25
  20. paddlex/inference/utils/hpi_model_info_collection.json +627 -204
  21. paddlex/inference/utils/misc.py +20 -0
  22. paddlex/inference/utils/mkldnn_blocklist.py +36 -2
  23. paddlex/inference/utils/official_models.py +126 -5
  24. paddlex/inference/utils/pp_option.py +81 -21
  25. paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +12 -2
  26. paddlex/ops/__init__.py +6 -3
  27. paddlex/utils/deps.py +2 -2
  28. paddlex/utils/device.py +4 -19
  29. paddlex/utils/download.py +10 -7
  30. paddlex/utils/flags.py +9 -0
  31. paddlex/utils/subclass_register.py +2 -2
  32. {paddlex-3.0.1.dist-info → paddlex-3.0.3.dist-info}/METADATA +307 -162
  33. {paddlex-3.0.1.dist-info → paddlex-3.0.3.dist-info}/RECORD +37 -35
  34. {paddlex-3.0.1.dist-info → paddlex-3.0.3.dist-info}/WHEEL +1 -1
  35. {paddlex-3.0.1.dist-info → paddlex-3.0.3.dist-info}/entry_points.txt +1 -0
  36. {paddlex-3.0.1.dist-info/licenses → paddlex-3.0.3.dist-info}/LICENSE +0 -0
  37. {paddlex-3.0.1.dist-info → paddlex-3.0.3.dist-info}/top_level.txt +0 -0
@@ -17,6 +17,7 @@ import importlib.resources
17
17
  import importlib.util
18
18
  import json
19
19
  import platform
20
+ from collections import defaultdict
20
21
  from functools import lru_cache
21
22
  from typing import Any, Dict, List, Literal, Optional, Tuple, Union
22
23
 
@@ -30,6 +31,7 @@ from ...utils.env import (
30
31
  get_paddle_version,
31
32
  )
32
33
  from ...utils.flags import USE_PIR_TRT
34
+ from .misc import is_mkldnn_available
33
35
  from .model_paths import ModelPaths
34
36
 
35
37
 
@@ -130,13 +132,25 @@ def suggest_inference_backend_and_config(
130
132
  available_backends = []
131
133
  if "paddle" in model_paths:
132
134
  available_backends.append("paddle")
133
- if is_built_with_openvino() and is_onnx_model_available:
135
+ if (
136
+ is_built_with_openvino()
137
+ and is_onnx_model_available
138
+ and hpi_config.device_type == "cpu"
139
+ ):
134
140
  available_backends.append("openvino")
135
- if is_built_with_ort() and is_onnx_model_available:
141
+ if (
142
+ is_built_with_ort()
143
+ and is_onnx_model_available
144
+ and hpi_config.device_type in ("cpu", "gpu")
145
+ ):
136
146
  available_backends.append("onnxruntime")
137
- if is_built_with_trt() and is_onnx_model_available:
147
+ if (
148
+ is_built_with_trt()
149
+ and is_onnx_model_available
150
+ and hpi_config.device_type == "gpu"
151
+ ):
138
152
  available_backends.append("tensorrt")
139
- if is_built_with_om() and "om" in model_paths:
153
+ if is_built_with_om() and "om" in model_paths and hpi_config.device_type == "npu":
140
154
  available_backends.append("om")
141
155
 
142
156
  if not available_backends:
@@ -186,24 +200,24 @@ def suggest_inference_backend_and_config(
186
200
  hpi_config.pdx_model_name
187
201
  ].copy()
188
202
 
203
+ if not (is_mkldnn_available() and hpi_config.device_type == "cpu"):
204
+ for pb in supported_pseudo_backends[:]:
205
+ if pb.startswith("paddle_mkldnn"):
206
+ supported_pseudo_backends.remove(pb)
207
+
189
208
  # XXX
190
209
  if not (
191
210
  USE_PIR_TRT
192
211
  and importlib.util.find_spec("tensorrt")
193
212
  and ctypes.util.find_library("nvinfer")
213
+ and hpi_config.device_type == "gpu"
194
214
  ):
195
- if (
196
- "paddle_tensorrt" in supported_pseudo_backends
197
- or "paddle_tensorrt_fp16" in supported_pseudo_backends
198
- ):
199
- supported_pseudo_backends.append("paddle")
200
- if "paddle_tensorrt" in supported_pseudo_backends:
201
- supported_pseudo_backends.remove("paddle_tensorrt")
202
- if "paddle_tensorrt_fp16" in supported_pseudo_backends:
203
- supported_pseudo_backends.remove("paddle_tensorrt_fp16")
204
-
205
- candidate_backends = []
206
- backend_to_pseudo_backend = {}
215
+ for pb in supported_pseudo_backends[:]:
216
+ if pb.startswith("paddle_tensorrt"):
217
+ supported_pseudo_backends.remove(pb)
218
+
219
+ supported_backends = []
220
+ backend_to_pseudo_backends = defaultdict(list)
207
221
  for pb in supported_pseudo_backends:
208
222
  if pb.startswith("paddle"):
209
223
  backend = "paddle"
@@ -213,41 +227,62 @@ def suggest_inference_backend_and_config(
213
227
  backend = pb
214
228
  if available_backends is not None and backend not in available_backends:
215
229
  continue
216
- candidate_backends.append(backend)
217
- backend_to_pseudo_backend[backend] = pb
230
+ supported_backends.append(backend)
231
+ backend_to_pseudo_backends[backend].append(pb)
218
232
 
219
- if not candidate_backends:
233
+ if not supported_backends:
220
234
  return None, "No inference backend can be selected."
221
235
 
222
236
  if hpi_config.backend is not None:
223
- if hpi_config.backend not in candidate_backends:
237
+ if hpi_config.backend not in supported_backends:
224
238
  return (
225
239
  None,
226
240
  f"{repr(hpi_config.backend)} is not a supported inference backend.",
227
241
  )
228
242
  suggested_backend = hpi_config.backend
229
243
  else:
230
- # The first backend is the preferred one.
231
- suggested_backend = candidate_backends[0]
244
+ # Prefer the first one.
245
+ suggested_backend = supported_backends[0]
246
+
247
+ pseudo_backends = backend_to_pseudo_backends[suggested_backend]
248
+
249
+ if hpi_config.backend_config is not None:
250
+ requested_base_pseudo_backend = None
251
+ if suggested_backend == "paddle":
252
+ if "run_mode" in hpi_config.backend_config:
253
+ if hpi_config.backend_config["run_mode"].startswith("mkldnn"):
254
+ requested_base_pseudo_backend = "paddle_mkldnn"
255
+ elif hpi_config.backend_config["run_mode"].startswith("trt"):
256
+ requested_base_pseudo_backend = "paddle_tensorrt"
257
+ if requested_base_pseudo_backend:
258
+ for pb in pseudo_backends:
259
+ if pb.startswith(requested_base_pseudo_backend):
260
+ break
261
+ else:
262
+ return None, "Unsupported backend configuration."
263
+ pseudo_backend = pseudo_backends[0]
232
264
 
233
265
  suggested_backend_config = {}
234
266
  if suggested_backend == "paddle":
235
- pseudo_backend = backend_to_pseudo_backend["paddle"]
236
267
  assert pseudo_backend in (
237
268
  "paddle",
238
269
  "paddle_fp16",
270
+ "paddle_mkldnn",
239
271
  "paddle_tensorrt",
240
272
  "paddle_tensorrt_fp16",
241
273
  ), pseudo_backend
242
- if pseudo_backend == "paddle_fp16":
274
+ if pseudo_backend == "paddle":
275
+ suggested_backend_config.update({"run_mode": "paddle"})
276
+ elif pseudo_backend == "paddle_fp16":
243
277
  suggested_backend_config.update({"run_mode": "paddle_fp16"})
278
+ elif pseudo_backend == "paddle_mkldnn":
279
+ suggested_backend_config.update({"run_mode": "mkldnn"})
244
280
  elif pseudo_backend == "paddle_tensorrt":
245
281
  suggested_backend_config.update({"run_mode": "trt_fp32"})
246
282
  elif pseudo_backend == "paddle_tensorrt_fp16":
247
283
  # TODO: Check if the target device supports FP16.
248
284
  suggested_backend_config.update({"run_mode": "trt_fp16"})
249
285
  elif suggested_backend == "tensorrt":
250
- pseudo_backend = backend_to_pseudo_backend["tensorrt"]
251
286
  assert pseudo_backend in ("tensorrt", "tensorrt_fp16"), pseudo_backend
252
287
  if pseudo_backend == "tensorrt_fp16":
253
288
  suggested_backend_config.update({"precision": "fp16"})