paddlex 3.0.1__py3-none-any.whl → 3.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddlex/.version +1 -1
- paddlex/inference/models/common/static_infer.py +18 -14
- paddlex/inference/models/common/ts/funcs.py +19 -8
- paddlex/inference/models/formula_recognition/predictor.py +1 -1
- paddlex/inference/models/formula_recognition/processors.py +2 -2
- paddlex/inference/models/text_recognition/result.py +1 -1
- paddlex/inference/pipelines/layout_parsing/layout_objects.py +859 -0
- paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +144 -205
- paddlex/inference/pipelines/layout_parsing/result_v2.py +6 -270
- paddlex/inference/pipelines/layout_parsing/setting.py +1 -0
- paddlex/inference/pipelines/layout_parsing/utils.py +108 -312
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py +302 -247
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/xycuts.py +156 -104
- paddlex/inference/pipelines/ocr/result.py +2 -2
- paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +1 -1
- paddlex/inference/serving/basic_serving/_app.py +46 -13
- paddlex/inference/utils/hpi.py +23 -16
- paddlex/inference/utils/hpi_model_info_collection.json +627 -202
- paddlex/inference/utils/misc.py +20 -0
- paddlex/inference/utils/mkldnn_blocklist.py +36 -2
- paddlex/inference/utils/official_models.py +126 -5
- paddlex/inference/utils/pp_option.py +48 -4
- paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +12 -2
- paddlex/ops/__init__.py +6 -3
- paddlex/utils/deps.py +2 -2
- paddlex/utils/device.py +4 -19
- paddlex/utils/flags.py +9 -0
- paddlex/utils/subclass_register.py +2 -2
- {paddlex-3.0.1.dist-info → paddlex-3.0.2.dist-info}/METADATA +307 -162
- {paddlex-3.0.1.dist-info → paddlex-3.0.2.dist-info}/RECORD +34 -32
- {paddlex-3.0.1.dist-info → paddlex-3.0.2.dist-info}/WHEEL +1 -1
- {paddlex-3.0.1.dist-info → paddlex-3.0.2.dist-info}/entry_points.txt +1 -0
- {paddlex-3.0.1.dist-info/licenses → paddlex-3.0.2.dist-info}/LICENSE +0 -0
- {paddlex-3.0.1.dist-info → paddlex-3.0.2.dist-info}/top_level.txt +0 -0
paddlex/inference/utils/hpi.py
CHANGED
@@ -17,6 +17,7 @@ import importlib.resources
|
|
17
17
|
import importlib.util
|
18
18
|
import json
|
19
19
|
import platform
|
20
|
+
from collections import defaultdict
|
20
21
|
from functools import lru_cache
|
21
22
|
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
22
23
|
|
@@ -30,6 +31,7 @@ from ...utils.env import (
|
|
30
31
|
get_paddle_version,
|
31
32
|
)
|
32
33
|
from ...utils.flags import USE_PIR_TRT
|
34
|
+
from .misc import is_mkldnn_available
|
33
35
|
from .model_paths import ModelPaths
|
34
36
|
|
35
37
|
|
@@ -186,24 +188,23 @@ def suggest_inference_backend_and_config(
|
|
186
188
|
hpi_config.pdx_model_name
|
187
189
|
].copy()
|
188
190
|
|
191
|
+
if not is_mkldnn_available():
|
192
|
+
if "paddle_mkldnn" in supported_pseudo_backends:
|
193
|
+
supported_pseudo_backends.remove("paddle_mkldnn")
|
194
|
+
|
189
195
|
# XXX
|
190
196
|
if not (
|
191
197
|
USE_PIR_TRT
|
192
198
|
and importlib.util.find_spec("tensorrt")
|
193
199
|
and ctypes.util.find_library("nvinfer")
|
194
200
|
):
|
195
|
-
if (
|
196
|
-
"paddle_tensorrt" in supported_pseudo_backends
|
197
|
-
or "paddle_tensorrt_fp16" in supported_pseudo_backends
|
198
|
-
):
|
199
|
-
supported_pseudo_backends.append("paddle")
|
200
201
|
if "paddle_tensorrt" in supported_pseudo_backends:
|
201
202
|
supported_pseudo_backends.remove("paddle_tensorrt")
|
202
203
|
if "paddle_tensorrt_fp16" in supported_pseudo_backends:
|
203
204
|
supported_pseudo_backends.remove("paddle_tensorrt_fp16")
|
204
205
|
|
205
|
-
|
206
|
-
|
206
|
+
supported_backends = []
|
207
|
+
backend_to_pseudo_backends = defaultdict(list)
|
207
208
|
for pb in supported_pseudo_backends:
|
208
209
|
if pb.startswith("paddle"):
|
209
210
|
backend = "paddle"
|
@@ -213,41 +214,47 @@ def suggest_inference_backend_and_config(
|
|
213
214
|
backend = pb
|
214
215
|
if available_backends is not None and backend not in available_backends:
|
215
216
|
continue
|
216
|
-
|
217
|
-
|
217
|
+
supported_backends.append(backend)
|
218
|
+
backend_to_pseudo_backends[backend].append(pb)
|
218
219
|
|
219
|
-
if not
|
220
|
+
if not supported_backends:
|
220
221
|
return None, "No inference backend can be selected."
|
221
222
|
|
222
223
|
if hpi_config.backend is not None:
|
223
|
-
if hpi_config.backend not in
|
224
|
+
if hpi_config.backend not in supported_backends:
|
224
225
|
return (
|
225
226
|
None,
|
226
227
|
f"{repr(hpi_config.backend)} is not a supported inference backend.",
|
227
228
|
)
|
228
229
|
suggested_backend = hpi_config.backend
|
230
|
+
pseudo_backends = backend_to_pseudo_backends[suggested_backend]
|
231
|
+
pseudo_backend = pseudo_backends[0]
|
229
232
|
else:
|
230
|
-
#
|
231
|
-
suggested_backend =
|
233
|
+
# Prefer the first one.
|
234
|
+
suggested_backend = supported_backends[0]
|
235
|
+
pseudo_backend = supported_pseudo_backends[0]
|
232
236
|
|
233
237
|
suggested_backend_config = {}
|
234
238
|
if suggested_backend == "paddle":
|
235
|
-
pseudo_backend = backend_to_pseudo_backend["paddle"]
|
236
239
|
assert pseudo_backend in (
|
237
240
|
"paddle",
|
238
241
|
"paddle_fp16",
|
242
|
+
"paddle_mkldnn",
|
239
243
|
"paddle_tensorrt",
|
240
244
|
"paddle_tensorrt_fp16",
|
241
245
|
), pseudo_backend
|
242
|
-
if pseudo_backend == "
|
246
|
+
if pseudo_backend == "paddle":
|
247
|
+
suggested_backend_config.update({"run_mode": "paddle"})
|
248
|
+
elif pseudo_backend == "paddle_fp16":
|
243
249
|
suggested_backend_config.update({"run_mode": "paddle_fp16"})
|
250
|
+
elif pseudo_backend == "paddle_mkldnn":
|
251
|
+
suggested_backend_config.update({"run_mode": "mkldnn"})
|
244
252
|
elif pseudo_backend == "paddle_tensorrt":
|
245
253
|
suggested_backend_config.update({"run_mode": "trt_fp32"})
|
246
254
|
elif pseudo_backend == "paddle_tensorrt_fp16":
|
247
255
|
# TODO: Check if the target device supports FP16.
|
248
256
|
suggested_backend_config.update({"run_mode": "trt_fp16"})
|
249
257
|
elif suggested_backend == "tensorrt":
|
250
|
-
pseudo_backend = backend_to_pseudo_backend["tensorrt"]
|
251
258
|
assert pseudo_backend in ("tensorrt", "tensorrt_fp16"), pseudo_backend
|
252
259
|
if pseudo_backend == "tensorrt_fp16":
|
253
260
|
suggested_backend_config.update({"precision": "fp16"})
|