paddlex 3.0.0rc1__py3-none-any.whl → 3.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddlex/.version +1 -1
- paddlex/__init__.py +1 -1
- paddlex/configs/modules/chart_parsing/PP-Chart2Table.yaml +13 -0
- paddlex/configs/modules/doc_vlm/PP-DocBee2-3B.yaml +14 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-L.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-M.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-S.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocBlockLayout.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-L.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout-M.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout-S.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout_plus-L.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_server_det.yaml +40 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_server_rec.yaml +39 -0
- paddlex/configs/modules/textline_orientation/PP-LCNet_x1_0_textline_ori.yaml +41 -0
- paddlex/configs/pipelines/OCR.yaml +7 -6
- paddlex/configs/pipelines/PP-ChatOCRv3-doc.yaml +3 -1
- paddlex/configs/pipelines/PP-ChatOCRv4-doc.yaml +91 -34
- paddlex/configs/pipelines/PP-StructureV3.yaml +72 -72
- paddlex/configs/pipelines/doc_understanding.yaml +1 -1
- paddlex/configs/pipelines/formula_recognition.yaml +2 -2
- paddlex/configs/pipelines/layout_parsing.yaml +3 -2
- paddlex/configs/pipelines/seal_recognition.yaml +1 -0
- paddlex/configs/pipelines/table_recognition.yaml +2 -1
- paddlex/configs/pipelines/table_recognition_v2.yaml +7 -1
- paddlex/hpip_links.html +20 -20
- paddlex/inference/common/batch_sampler/doc_vlm_batch_sampler.py +33 -10
- paddlex/inference/common/batch_sampler/image_batch_sampler.py +34 -25
- paddlex/inference/common/result/mixin.py +19 -12
- paddlex/inference/models/base/predictor/base_predictor.py +2 -8
- paddlex/inference/models/common/static_infer.py +29 -73
- paddlex/inference/models/common/tokenizer/__init__.py +2 -0
- paddlex/inference/models/common/tokenizer/clip_tokenizer.py +1 -1
- paddlex/inference/models/common/tokenizer/gpt_tokenizer.py +2 -2
- paddlex/inference/models/common/tokenizer/qwen2_5_tokenizer.py +112 -0
- paddlex/inference/models/common/tokenizer/qwen2_tokenizer.py +7 -1
- paddlex/inference/models/common/tokenizer/qwen_tokenizer.py +288 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils.py +13 -13
- paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py +3 -3
- paddlex/inference/models/common/tokenizer/vocab.py +7 -7
- paddlex/inference/models/common/ts/funcs.py +19 -8
- paddlex/inference/models/common/vlm/conversion_utils.py +99 -0
- paddlex/inference/models/common/vlm/fusion_ops.py +205 -0
- paddlex/inference/models/common/vlm/generation/configuration_utils.py +1 -1
- paddlex/inference/models/common/vlm/generation/logits_process.py +1 -1
- paddlex/inference/models/common/vlm/generation/utils.py +1 -1
- paddlex/inference/models/common/vlm/transformers/configuration_utils.py +3 -3
- paddlex/inference/models/common/vlm/transformers/conversion_utils.py +3 -3
- paddlex/inference/models/common/vlm/transformers/model_outputs.py +2 -2
- paddlex/inference/models/common/vlm/transformers/model_utils.py +7 -31
- paddlex/inference/models/doc_vlm/modeling/GOT_ocr_2_0.py +830 -0
- paddlex/inference/models/doc_vlm/modeling/__init__.py +2 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2.py +1606 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_5_vl.py +3006 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_vl.py +0 -105
- paddlex/inference/models/doc_vlm/predictor.py +79 -24
- paddlex/inference/models/doc_vlm/processors/GOT_ocr_2_0.py +97 -0
- paddlex/inference/models/doc_vlm/processors/__init__.py +2 -0
- paddlex/inference/models/doc_vlm/processors/common.py +189 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_5_vl.py +548 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_vl.py +21 -176
- paddlex/inference/models/formula_recognition/predictor.py +8 -2
- paddlex/inference/models/formula_recognition/processors.py +90 -77
- paddlex/inference/models/formula_recognition/result.py +28 -27
- paddlex/inference/models/image_feature/processors.py +3 -4
- paddlex/inference/models/keypoint_detection/predictor.py +3 -0
- paddlex/inference/models/object_detection/predictor.py +2 -0
- paddlex/inference/models/object_detection/processors.py +28 -3
- paddlex/inference/models/object_detection/utils.py +2 -0
- paddlex/inference/models/table_structure_recognition/result.py +0 -10
- paddlex/inference/models/text_detection/predictor.py +8 -0
- paddlex/inference/models/text_detection/processors.py +44 -10
- paddlex/inference/models/text_detection/result.py +0 -10
- paddlex/inference/models/text_recognition/result.py +1 -1
- paddlex/inference/pipelines/__init__.py +9 -5
- paddlex/inference/pipelines/_parallel.py +172 -0
- paddlex/inference/pipelines/anomaly_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/attribute_recognition/pipeline.py +11 -1
- paddlex/inference/pipelines/base.py +14 -4
- paddlex/inference/pipelines/components/faisser.py +1 -1
- paddlex/inference/pipelines/doc_preprocessor/pipeline.py +53 -27
- paddlex/inference/pipelines/formula_recognition/pipeline.py +120 -82
- paddlex/inference/pipelines/formula_recognition/result.py +1 -11
- paddlex/inference/pipelines/image_classification/pipeline.py +16 -6
- paddlex/inference/pipelines/image_multilabel_classification/pipeline.py +16 -6
- paddlex/inference/pipelines/instance_segmentation/pipeline.py +16 -6
- paddlex/inference/pipelines/keypoint_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/layout_parsing/layout_objects.py +859 -0
- paddlex/inference/pipelines/layout_parsing/pipeline.py +34 -47
- paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +832 -260
- paddlex/inference/pipelines/layout_parsing/result.py +4 -17
- paddlex/inference/pipelines/layout_parsing/result_v2.py +259 -245
- paddlex/inference/pipelines/layout_parsing/setting.py +88 -0
- paddlex/inference/pipelines/layout_parsing/utils.py +391 -2028
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/__init__.py +16 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py +1199 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/xycuts.py +615 -0
- paddlex/inference/pipelines/m_3d_bev_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/multilingual_speech_recognition/pipeline.py +2 -2
- paddlex/inference/pipelines/object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/ocr/pipeline.py +127 -70
- paddlex/inference/pipelines/ocr/result.py +21 -18
- paddlex/inference/pipelines/open_vocabulary_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/open_vocabulary_segmentation/pipeline.py +2 -2
- paddlex/inference/pipelines/pp_chatocr/pipeline_base.py +2 -2
- paddlex/inference/pipelines/pp_chatocr/pipeline_v3.py +2 -5
- paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +6 -6
- paddlex/inference/pipelines/rotated_object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/seal_recognition/pipeline.py +109 -53
- paddlex/inference/pipelines/semantic_segmentation/pipeline.py +16 -6
- paddlex/inference/pipelines/small_object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/table_recognition/pipeline.py +26 -18
- paddlex/inference/pipelines/table_recognition/pipeline_v2.py +624 -53
- paddlex/inference/pipelines/table_recognition/result.py +1 -1
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing_v2.py +9 -5
- paddlex/inference/pipelines/ts_anomaly_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/ts_classification/pipeline.py +2 -2
- paddlex/inference/pipelines/ts_forecasting/pipeline.py +2 -2
- paddlex/inference/pipelines/video_classification/pipeline.py +2 -2
- paddlex/inference/pipelines/video_detection/pipeline.py +2 -2
- paddlex/inference/serving/basic_serving/_app.py +46 -13
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/common.py +5 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py +0 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py +0 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py +1 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py +6 -2
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py +1 -5
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py +4 -5
- paddlex/inference/serving/infra/utils.py +20 -22
- paddlex/inference/serving/schemas/formula_recognition.py +1 -1
- paddlex/inference/serving/schemas/layout_parsing.py +1 -2
- paddlex/inference/serving/schemas/pp_chatocrv3_doc.py +1 -2
- paddlex/inference/serving/schemas/pp_chatocrv4_doc.py +2 -2
- paddlex/inference/serving/schemas/pp_structurev3.py +10 -6
- paddlex/inference/serving/schemas/seal_recognition.py +1 -1
- paddlex/inference/serving/schemas/table_recognition.py +2 -6
- paddlex/inference/serving/schemas/table_recognition_v2.py +5 -6
- paddlex/inference/utils/hpi.py +30 -16
- paddlex/inference/utils/hpi_model_info_collection.json +666 -162
- paddlex/inference/utils/io/readers.py +12 -12
- paddlex/inference/utils/misc.py +20 -0
- paddlex/inference/utils/mkldnn_blocklist.py +59 -0
- paddlex/inference/utils/official_models.py +140 -5
- paddlex/inference/utils/pp_option.py +74 -9
- paddlex/model.py +2 -2
- paddlex/modules/__init__.py +1 -1
- paddlex/modules/anomaly_detection/evaluator.py +2 -2
- paddlex/modules/base/__init__.py +1 -1
- paddlex/modules/base/evaluator.py +5 -5
- paddlex/modules/base/trainer.py +1 -1
- paddlex/modules/doc_vlm/dataset_checker.py +2 -2
- paddlex/modules/doc_vlm/evaluator.py +2 -2
- paddlex/modules/doc_vlm/exportor.py +2 -2
- paddlex/modules/doc_vlm/model_list.py +1 -1
- paddlex/modules/doc_vlm/trainer.py +2 -2
- paddlex/modules/face_recognition/evaluator.py +2 -2
- paddlex/modules/formula_recognition/evaluator.py +5 -2
- paddlex/modules/formula_recognition/model_list.py +3 -0
- paddlex/modules/formula_recognition/trainer.py +3 -0
- paddlex/modules/general_recognition/evaluator.py +1 -1
- paddlex/modules/image_classification/evaluator.py +2 -2
- paddlex/modules/image_classification/model_list.py +1 -0
- paddlex/modules/instance_segmentation/evaluator.py +1 -1
- paddlex/modules/keypoint_detection/evaluator.py +1 -1
- paddlex/modules/m_3d_bev_detection/evaluator.py +2 -2
- paddlex/modules/multilabel_classification/evaluator.py +2 -2
- paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py +4 -4
- paddlex/modules/object_detection/evaluator.py +2 -2
- paddlex/modules/object_detection/model_list.py +2 -0
- paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +12 -2
- paddlex/modules/semantic_segmentation/evaluator.py +2 -2
- paddlex/modules/table_recognition/evaluator.py +2 -2
- paddlex/modules/text_detection/evaluator.py +2 -2
- paddlex/modules/text_detection/model_list.py +2 -0
- paddlex/modules/text_recognition/evaluator.py +2 -2
- paddlex/modules/text_recognition/model_list.py +2 -0
- paddlex/modules/ts_anomaly_detection/evaluator.py +2 -2
- paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py +1 -1
- paddlex/modules/ts_classification/evaluator.py +2 -2
- paddlex/modules/ts_forecast/evaluator.py +2 -2
- paddlex/modules/video_classification/evaluator.py +2 -2
- paddlex/modules/video_detection/evaluator.py +2 -2
- paddlex/ops/__init__.py +8 -5
- paddlex/paddlex_cli.py +19 -13
- paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py +2 -2
- paddlex/repo_apis/PaddleClas_api/cls/config.py +1 -1
- paddlex/repo_apis/PaddleClas_api/cls/model.py +1 -1
- paddlex/repo_apis/PaddleClas_api/cls/register.py +10 -0
- paddlex/repo_apis/PaddleClas_api/cls/runner.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/config.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/model.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +25 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/register.py +30 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py +3 -3
- paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py +5 -9
- paddlex/repo_apis/PaddleOCR_api/formula_rec/register.py +27 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/table_rec/model.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_det/model.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_det/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +3 -3
- paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +5 -9
- paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleSeg_api/seg/model.py +1 -1
- paddlex/repo_apis/PaddleSeg_api/seg/runner.py +1 -1
- paddlex/repo_apis/PaddleTS_api/ts_ad/config.py +3 -3
- paddlex/repo_apis/PaddleTS_api/ts_cls/config.py +2 -2
- paddlex/repo_apis/PaddleTS_api/ts_fc/config.py +4 -4
- paddlex/repo_apis/PaddleVideo_api/video_cls/config.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_cls/model.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/config.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/model.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/runner.py +1 -1
- paddlex/repo_apis/base/config.py +1 -1
- paddlex/repo_manager/core.py +3 -3
- paddlex/repo_manager/meta.py +6 -2
- paddlex/repo_manager/repo.py +17 -16
- paddlex/utils/custom_device_list.py +26 -2
- paddlex/utils/deps.py +3 -3
- paddlex/utils/device.py +5 -13
- paddlex/utils/env.py +4 -0
- paddlex/utils/flags.py +11 -4
- paddlex/utils/fonts/__init__.py +34 -4
- paddlex/utils/misc.py +1 -1
- paddlex/utils/subclass_register.py +2 -2
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/METADATA +349 -208
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/RECORD +240 -211
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/WHEEL +1 -1
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/entry_points.txt +1 -0
- {paddlex-3.0.0rc1.dist-info/licenses → paddlex-3.0.2.dist-info}/LICENSE +0 -0
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,615 @@
|
|
1
|
+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from copy import deepcopy
|
16
|
+
from typing import Dict, List, Tuple
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
|
20
|
+
from ..layout_objects import LayoutBlock, LayoutRegion
|
21
|
+
from ..setting import BLOCK_LABEL_MAP, XYCUT_SETTINGS
|
22
|
+
from ..utils import calculate_overlap_ratio, calculate_projection_overlap_ratio
|
23
|
+
from .utils import (
|
24
|
+
calculate_discontinuous_projection,
|
25
|
+
euclidean_insert,
|
26
|
+
find_local_minima_flat_regions,
|
27
|
+
get_blocks_by_direction_interval,
|
28
|
+
get_cut_blocks,
|
29
|
+
insert_child_blocks,
|
30
|
+
manhattan_insert,
|
31
|
+
projection_by_bboxes,
|
32
|
+
recursive_xy_cut,
|
33
|
+
recursive_yx_cut,
|
34
|
+
reference_insert,
|
35
|
+
shrink_overlapping_boxes,
|
36
|
+
sort_normal_blocks,
|
37
|
+
update_doc_title_child_blocks,
|
38
|
+
update_paragraph_title_child_blocks,
|
39
|
+
update_region_child_blocks,
|
40
|
+
update_vision_child_blocks,
|
41
|
+
weighted_distance_insert,
|
42
|
+
)
|
43
|
+
|
44
|
+
|
45
|
+
def pre_process(
|
46
|
+
region: LayoutRegion,
|
47
|
+
) -> List:
|
48
|
+
"""
|
49
|
+
Preprocess the layout for sorting purposes.
|
50
|
+
|
51
|
+
This function performs two main tasks:
|
52
|
+
1. Pre-cuts the layout to ensure the document is correctly partitioned and sorted.
|
53
|
+
2. Match the blocks with their children.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
region: LayoutParsingRegion, the layout region to be pre-processed.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
List: A list of pre-cutted layout blocks list.
|
60
|
+
"""
|
61
|
+
mask_labels = [
|
62
|
+
"header",
|
63
|
+
"unordered",
|
64
|
+
"footer",
|
65
|
+
"vision_footnote",
|
66
|
+
"sub_paragraph_title",
|
67
|
+
"doc_title_text",
|
68
|
+
"vision_title",
|
69
|
+
"sub_region",
|
70
|
+
]
|
71
|
+
pre_cut_block_idxes = []
|
72
|
+
block_map = region.block_map
|
73
|
+
blocks: List[LayoutBlock] = list(block_map.values())
|
74
|
+
for block in blocks:
|
75
|
+
if block.order_label not in mask_labels:
|
76
|
+
update_region_label(block, region)
|
77
|
+
|
78
|
+
block_direction = block.direction
|
79
|
+
if block_direction == "horizontal":
|
80
|
+
tolerance_len = block.long_side_length // 5
|
81
|
+
else:
|
82
|
+
tolerance_len = block.short_side_length // 10
|
83
|
+
|
84
|
+
block_center = (
|
85
|
+
block.bbox[region.direction_start_index]
|
86
|
+
+ block.bbox[region.direction_end_index]
|
87
|
+
) / 2
|
88
|
+
center_offset = abs(block_center - region.direction_center_coordinate)
|
89
|
+
is_centered = center_offset <= tolerance_len
|
90
|
+
if is_centered:
|
91
|
+
pre_cut_block_idxes.append(block.index)
|
92
|
+
|
93
|
+
pre_cut_list = []
|
94
|
+
cut_direction = region.secondary_direction
|
95
|
+
cut_coordinates = []
|
96
|
+
discontinuous = []
|
97
|
+
all_boxes = np.array(
|
98
|
+
[block.bbox for block in blocks if block.order_label not in mask_labels]
|
99
|
+
)
|
100
|
+
if len(all_boxes) == 0:
|
101
|
+
return pre_cut_list
|
102
|
+
if pre_cut_block_idxes:
|
103
|
+
discontinuous, num_list = calculate_discontinuous_projection(
|
104
|
+
all_boxes, direction=cut_direction, return_num=True
|
105
|
+
)
|
106
|
+
for idx in pre_cut_block_idxes:
|
107
|
+
block = block_map[idx]
|
108
|
+
if (
|
109
|
+
block.order_label not in mask_labels
|
110
|
+
and block.secondary_direction == cut_direction
|
111
|
+
):
|
112
|
+
if (
|
113
|
+
block.secondary_direction_start_coordinate,
|
114
|
+
block.secondary_direction_end_coordinate,
|
115
|
+
) in discontinuous:
|
116
|
+
idx = discontinuous.index(
|
117
|
+
(
|
118
|
+
block.secondary_direction_start_coordinate,
|
119
|
+
block.secondary_direction_end_coordinate,
|
120
|
+
)
|
121
|
+
)
|
122
|
+
if num_list[idx] == 1:
|
123
|
+
cut_coordinates.append(
|
124
|
+
block.secondary_direction_start_coordinate
|
125
|
+
)
|
126
|
+
cut_coordinates.append(block.secondary_direction_end_coordinate)
|
127
|
+
secondary_check_bboxes = np.array(
|
128
|
+
[
|
129
|
+
block.bbox
|
130
|
+
for block in blocks
|
131
|
+
if block.order_label not in mask_labels + ["vision"]
|
132
|
+
]
|
133
|
+
)
|
134
|
+
if len(secondary_check_bboxes) > 0 or blocks[0].label == "region":
|
135
|
+
secondary_discontinuous = calculate_discontinuous_projection(
|
136
|
+
secondary_check_bboxes, direction=region.direction
|
137
|
+
)
|
138
|
+
if len(secondary_discontinuous) == 1 or blocks[0].label == "region":
|
139
|
+
if not discontinuous:
|
140
|
+
discontinuous = calculate_discontinuous_projection(
|
141
|
+
all_boxes, direction=cut_direction
|
142
|
+
)
|
143
|
+
current_interval = discontinuous[0]
|
144
|
+
pre_cut_coordinates = [
|
145
|
+
cood for cood in cut_coordinates if cood < current_interval[1]
|
146
|
+
]
|
147
|
+
if not pre_cut_coordinates:
|
148
|
+
pre_cut_coordinate = 0
|
149
|
+
else:
|
150
|
+
pre_cut_coordinate = max(pre_cut_coordinates)
|
151
|
+
pre_cut_coordinate = max(current_interval[0], pre_cut_coordinate)
|
152
|
+
for interval in discontinuous[1:]:
|
153
|
+
gap_len = interval[0] - current_interval[1]
|
154
|
+
if (
|
155
|
+
gap_len >= region.text_line_height * 3
|
156
|
+
or blocks[0].label == "region"
|
157
|
+
):
|
158
|
+
cut_coordinates.append(current_interval[1])
|
159
|
+
elif gap_len > region.text_line_height * 1.2:
|
160
|
+
pre_blocks = get_blocks_by_direction_interval(
|
161
|
+
list(block_map.values()),
|
162
|
+
pre_cut_coordinate,
|
163
|
+
current_interval[1],
|
164
|
+
cut_direction,
|
165
|
+
)
|
166
|
+
post_blocks = get_blocks_by_direction_interval(
|
167
|
+
list(block_map.values()),
|
168
|
+
current_interval[1],
|
169
|
+
interval[1],
|
170
|
+
cut_direction,
|
171
|
+
)
|
172
|
+
pre_bboxes = np.array([block.bbox for block in pre_blocks])
|
173
|
+
post_bboxes = np.array([block.bbox for block in post_blocks])
|
174
|
+
projection_index = 1 if cut_direction == "horizontal" else 0
|
175
|
+
pre_projection = projection_by_bboxes(pre_bboxes, projection_index)
|
176
|
+
post_projection = projection_by_bboxes(
|
177
|
+
post_bboxes, projection_index
|
178
|
+
)
|
179
|
+
pre_intervals = find_local_minima_flat_regions(pre_projection)
|
180
|
+
post_intervals = find_local_minima_flat_regions(post_projection)
|
181
|
+
pre_gap_boxes = []
|
182
|
+
if pre_intervals is not None:
|
183
|
+
for start, end in pre_intervals:
|
184
|
+
bbox = [0] * 4
|
185
|
+
bbox[projection_index] = start
|
186
|
+
bbox[projection_index + 2] = end
|
187
|
+
pre_gap_boxes.append(bbox)
|
188
|
+
post_gap_boxes = []
|
189
|
+
if post_intervals is not None:
|
190
|
+
for start, end in post_intervals:
|
191
|
+
bbox = [0] * 4
|
192
|
+
bbox[projection_index] = start
|
193
|
+
bbox[projection_index + 2] = end
|
194
|
+
post_gap_boxes.append(bbox)
|
195
|
+
max_gap_boxes_num = max(len(pre_gap_boxes), len(post_gap_boxes))
|
196
|
+
if max_gap_boxes_num > 0:
|
197
|
+
discontinuous_intervals = calculate_discontinuous_projection(
|
198
|
+
pre_gap_boxes + post_gap_boxes, direction=region.direction
|
199
|
+
)
|
200
|
+
if len(discontinuous_intervals) != max_gap_boxes_num:
|
201
|
+
pre_cut_coordinate = current_interval[1]
|
202
|
+
cut_coordinates.append(current_interval[1])
|
203
|
+
current_interval = interval
|
204
|
+
cut_list = get_cut_blocks(blocks, cut_direction, cut_coordinates, mask_labels)
|
205
|
+
pre_cut_list.extend(cut_list)
|
206
|
+
if region.direction == "vertical":
|
207
|
+
pre_cut_list = pre_cut_list[::-1]
|
208
|
+
|
209
|
+
return pre_cut_list
|
210
|
+
|
211
|
+
|
212
|
+
def update_region_label(
|
213
|
+
block: LayoutBlock,
|
214
|
+
region: LayoutRegion,
|
215
|
+
) -> None:
|
216
|
+
"""
|
217
|
+
Update the region label of a block based on its label and match the block with its children.
|
218
|
+
|
219
|
+
Args:
|
220
|
+
blocks (List[LayoutBlock]): The list of blocks to process.
|
221
|
+
config (Dict[str, Any]): The configuration dictionary containing the necessary information.
|
222
|
+
block_idx (int): The index of the current block being processed.
|
223
|
+
|
224
|
+
Returns:
|
225
|
+
None
|
226
|
+
"""
|
227
|
+
if block.label in BLOCK_LABEL_MAP["header_labels"]:
|
228
|
+
block.order_label = "header"
|
229
|
+
elif block.label in BLOCK_LABEL_MAP["doc_title_labels"]:
|
230
|
+
block.order_label = "doc_title"
|
231
|
+
elif (
|
232
|
+
block.label in BLOCK_LABEL_MAP["paragraph_title_labels"]
|
233
|
+
and block.order_label is None
|
234
|
+
):
|
235
|
+
block.order_label = "paragraph_title"
|
236
|
+
elif block.label in BLOCK_LABEL_MAP["vision_labels"]:
|
237
|
+
block.order_label = "vision"
|
238
|
+
block.num_of_lines = 1
|
239
|
+
block.update_direction(region.direction)
|
240
|
+
elif block.label in BLOCK_LABEL_MAP["footer_labels"]:
|
241
|
+
block.order_label = "footer"
|
242
|
+
elif block.label in BLOCK_LABEL_MAP["unordered_labels"]:
|
243
|
+
block.order_label = "unordered"
|
244
|
+
elif block.label == "region":
|
245
|
+
block.order_label = "region"
|
246
|
+
else:
|
247
|
+
block.order_label = "normal_text"
|
248
|
+
|
249
|
+
# only vision and doc title block can have child block
|
250
|
+
if block.order_label not in ["vision", "doc_title", "paragraph_title", "region"]:
|
251
|
+
return
|
252
|
+
|
253
|
+
# match doc title text block
|
254
|
+
if block.order_label == "doc_title":
|
255
|
+
update_doc_title_child_blocks(block, region)
|
256
|
+
# match sub title block
|
257
|
+
elif block.order_label == "paragraph_title":
|
258
|
+
update_paragraph_title_child_blocks(block, region)
|
259
|
+
# match vision title block and vision footnote block
|
260
|
+
elif block.order_label == "vision":
|
261
|
+
update_vision_child_blocks(block, region)
|
262
|
+
elif block.order_label == "region":
|
263
|
+
update_region_child_blocks(block, region)
|
264
|
+
|
265
|
+
|
266
|
+
def get_layout_structure(
|
267
|
+
blocks: List[LayoutBlock],
|
268
|
+
region_direction: str,
|
269
|
+
region_secondary_direction: str,
|
270
|
+
) -> Tuple[List[Dict[str, any]], bool]:
|
271
|
+
"""
|
272
|
+
Determine the layout cross column of blocks.
|
273
|
+
|
274
|
+
Args:
|
275
|
+
blocks (List[Dict[str, any]]): List of block dictionaries containing 'label' and 'block_bbox'.
|
276
|
+
|
277
|
+
Returns:
|
278
|
+
Tuple[List[Dict[str, any]], bool]: Updated list of blocks with layout information and a boolean
|
279
|
+
indicating if the cross layout area is greater than the single layout area.
|
280
|
+
"""
|
281
|
+
blocks.sort(
|
282
|
+
key=lambda x: (x.bbox[0], x.width),
|
283
|
+
)
|
284
|
+
|
285
|
+
mask_labels = ["doc_title", "cross_layout", "cross_reference"]
|
286
|
+
for block_idx, block in enumerate(blocks):
|
287
|
+
if block.order_label in mask_labels:
|
288
|
+
continue
|
289
|
+
|
290
|
+
for ref_idx, ref_block in enumerate(blocks):
|
291
|
+
if block_idx == ref_idx or ref_block.order_label in mask_labels:
|
292
|
+
continue
|
293
|
+
|
294
|
+
bbox_iou = calculate_overlap_ratio(block.bbox, ref_block.bbox)
|
295
|
+
if bbox_iou:
|
296
|
+
if ref_block.order_label == "vision":
|
297
|
+
ref_block.order_label = "cross_layout"
|
298
|
+
break
|
299
|
+
if bbox_iou > 0.1 and block.area < ref_block.area:
|
300
|
+
block.order_label = "cross_layout"
|
301
|
+
break
|
302
|
+
|
303
|
+
match_projection_iou = calculate_projection_overlap_ratio(
|
304
|
+
block.bbox,
|
305
|
+
ref_block.bbox,
|
306
|
+
region_direction,
|
307
|
+
)
|
308
|
+
if match_projection_iou > 0:
|
309
|
+
for second_ref_idx, second_ref_block in enumerate(blocks):
|
310
|
+
if (
|
311
|
+
second_ref_idx in [block_idx, ref_idx]
|
312
|
+
or second_ref_block.order_label in mask_labels
|
313
|
+
):
|
314
|
+
continue
|
315
|
+
|
316
|
+
bbox_iou = calculate_overlap_ratio(
|
317
|
+
block.bbox, second_ref_block.bbox
|
318
|
+
)
|
319
|
+
if bbox_iou > 0.1:
|
320
|
+
if second_ref_block.order_label == "vision":
|
321
|
+
second_ref_block.order_label = "cross_layout"
|
322
|
+
break
|
323
|
+
if (
|
324
|
+
block.order_label == "vision"
|
325
|
+
or block.area < second_ref_block.area
|
326
|
+
):
|
327
|
+
block.order_label = "cross_layout"
|
328
|
+
break
|
329
|
+
|
330
|
+
second_match_projection_iou = calculate_projection_overlap_ratio(
|
331
|
+
block.bbox,
|
332
|
+
second_ref_block.bbox,
|
333
|
+
region_direction,
|
334
|
+
)
|
335
|
+
ref_match_projection_iou = calculate_projection_overlap_ratio(
|
336
|
+
ref_block.bbox,
|
337
|
+
second_ref_block.bbox,
|
338
|
+
region_direction,
|
339
|
+
)
|
340
|
+
secondary_direction_ref_match_projection_overlap_ratio = (
|
341
|
+
calculate_projection_overlap_ratio(
|
342
|
+
ref_block.bbox,
|
343
|
+
second_ref_block.bbox,
|
344
|
+
region_secondary_direction,
|
345
|
+
)
|
346
|
+
)
|
347
|
+
if (
|
348
|
+
second_match_projection_iou > 0
|
349
|
+
and ref_match_projection_iou == 0
|
350
|
+
and secondary_direction_ref_match_projection_overlap_ratio > 0
|
351
|
+
):
|
352
|
+
if block.order_label in ["vision", "region"] or (
|
353
|
+
ref_block.order_label == "normal_text"
|
354
|
+
and second_ref_block.order_label == "normal_text"
|
355
|
+
and ref_block.long_side_length
|
356
|
+
> ref_block.text_line_height
|
357
|
+
* XYCUT_SETTINGS.get(
|
358
|
+
"cross_layout_ref_text_block_words_num_threshold", 8
|
359
|
+
)
|
360
|
+
and second_ref_block.long_side_length
|
361
|
+
> second_ref_block.text_line_height
|
362
|
+
* XYCUT_SETTINGS.get(
|
363
|
+
"cross_layout_ref_text_block_words_num_threshold", 8
|
364
|
+
)
|
365
|
+
):
|
366
|
+
block.order_label = (
|
367
|
+
"cross_reference"
|
368
|
+
if block.label == "reference"
|
369
|
+
else "cross_layout"
|
370
|
+
)
|
371
|
+
|
372
|
+
|
373
|
+
def sort_by_xycut(
|
374
|
+
block_bboxes: List,
|
375
|
+
direction: str = "vertical",
|
376
|
+
min_gap: int = 1,
|
377
|
+
) -> List[int]:
|
378
|
+
"""
|
379
|
+
Sort bounding boxes using recursive XY cut method based on the specified direction.
|
380
|
+
|
381
|
+
Args:
|
382
|
+
block_bboxes (Union[np.ndarray, List[List[int]]]): An array or list of bounding boxes,
|
383
|
+
where each box is represented as
|
384
|
+
[x_min, y_min, x_max, y_max].
|
385
|
+
direction (int): direction for the initial cut. Use 1 for Y-axis first and 0 for X-axis first.
|
386
|
+
Defaults to 0.
|
387
|
+
min_gap (int): Minimum gap width to consider a separation between segments. Defaults to 1.
|
388
|
+
|
389
|
+
Returns:
|
390
|
+
List[int]: A list of indices representing the order of sorted bounding boxes.
|
391
|
+
"""
|
392
|
+
block_bboxes = np.asarray(block_bboxes).astype(int)
|
393
|
+
res = []
|
394
|
+
if direction == "vertical":
|
395
|
+
recursive_yx_cut(
|
396
|
+
block_bboxes,
|
397
|
+
np.arange(len(block_bboxes)).tolist(),
|
398
|
+
res,
|
399
|
+
min_gap,
|
400
|
+
)
|
401
|
+
else:
|
402
|
+
recursive_xy_cut(
|
403
|
+
block_bboxes,
|
404
|
+
np.arange(len(block_bboxes)).tolist(),
|
405
|
+
res,
|
406
|
+
min_gap,
|
407
|
+
)
|
408
|
+
return res
|
409
|
+
|
410
|
+
|
411
|
+
def match_unsorted_blocks(
|
412
|
+
sorted_blocks: List[LayoutBlock],
|
413
|
+
unsorted_blocks: List[LayoutBlock],
|
414
|
+
region: LayoutRegion,
|
415
|
+
) -> List[LayoutBlock]:
|
416
|
+
"""
|
417
|
+
Match special blocks with the sorted blocks based on their region labels.
|
418
|
+
Args:
|
419
|
+
sorted_blocks (List[LayoutBlock]): Sorted blocks to be matched.
|
420
|
+
unsorted_blocks (List[LayoutBlock]): Unsorted blocks to be matched.
|
421
|
+
config (Dict): Configuration dictionary containing various parameters.
|
422
|
+
median_width (int): Median width value used for calculations.
|
423
|
+
|
424
|
+
Returns:
|
425
|
+
List[LayoutBlock]: The updated sorted blocks after matching special blocks.
|
426
|
+
"""
|
427
|
+
distance_type_map = {
|
428
|
+
"cross_layout": weighted_distance_insert,
|
429
|
+
"paragraph_title": weighted_distance_insert,
|
430
|
+
"doc_title": weighted_distance_insert,
|
431
|
+
"vision_title": weighted_distance_insert,
|
432
|
+
"vision": weighted_distance_insert,
|
433
|
+
"cross_reference": reference_insert,
|
434
|
+
"unordered": manhattan_insert,
|
435
|
+
"other": manhattan_insert,
|
436
|
+
"region": euclidean_insert,
|
437
|
+
}
|
438
|
+
|
439
|
+
unsorted_blocks = sort_normal_blocks(
|
440
|
+
unsorted_blocks,
|
441
|
+
region.text_line_height,
|
442
|
+
region.text_line_width,
|
443
|
+
region.direction,
|
444
|
+
)
|
445
|
+
for idx, block in enumerate(unsorted_blocks):
|
446
|
+
order_label = block.order_label if block.label != "region" else "region"
|
447
|
+
if idx == 0 and order_label == "doc_title":
|
448
|
+
sorted_blocks.insert(0, block)
|
449
|
+
continue
|
450
|
+
sorted_blocks = distance_type_map[order_label](
|
451
|
+
block=block, sorted_blocks=sorted_blocks, region=region
|
452
|
+
)
|
453
|
+
return sorted_blocks
|
454
|
+
|
455
|
+
|
456
|
+
def xycut_enhanced(
|
457
|
+
region: LayoutRegion,
|
458
|
+
) -> LayoutRegion:
|
459
|
+
"""
|
460
|
+
xycut_enhance function performs the following steps:
|
461
|
+
1. Preprocess the input blocks by extracting headers, footers, and pre-cut blocks.
|
462
|
+
2. Mask blocks that are crossing different blocks.
|
463
|
+
3. Perform xycut_enhanced algorithm on the remaining blocks.
|
464
|
+
4. Match unsorted blocks with the sorted blocks based on their order labels.
|
465
|
+
5. Update child blocks of the sorted blocks based on their parent blocks.
|
466
|
+
6. Return the ordered result list.
|
467
|
+
|
468
|
+
Args:
|
469
|
+
blocks (List[LayoutBlock]): Input blocks to be processed.
|
470
|
+
|
471
|
+
Returns:
|
472
|
+
List[LayoutBlock]: Ordered result list after processing.
|
473
|
+
"""
|
474
|
+
if len(region.block_map) == 0:
|
475
|
+
return []
|
476
|
+
|
477
|
+
pre_cut_list: List[List[LayoutBlock]] = pre_process(region)
|
478
|
+
final_order_res_list: List[LayoutBlock] = []
|
479
|
+
|
480
|
+
header_blocks: List[LayoutBlock] = [
|
481
|
+
region.block_map[idx] for idx in region.header_block_idxes
|
482
|
+
]
|
483
|
+
unordered_blocks: List[LayoutBlock] = [
|
484
|
+
region.block_map[idx] for idx in region.unordered_block_idxes
|
485
|
+
]
|
486
|
+
footer_blocks: List[LayoutBlock] = [
|
487
|
+
region.block_map[idx] for idx in region.footer_block_idxes
|
488
|
+
]
|
489
|
+
|
490
|
+
header_blocks: List[LayoutBlock] = sort_normal_blocks(
|
491
|
+
header_blocks, region.text_line_height, region.text_line_width, region.direction
|
492
|
+
)
|
493
|
+
footer_blocks: List[LayoutBlock] = sort_normal_blocks(
|
494
|
+
footer_blocks, region.text_line_height, region.text_line_width, region.direction
|
495
|
+
)
|
496
|
+
unordered_blocks: List[LayoutBlock] = sort_normal_blocks(
|
497
|
+
unordered_blocks,
|
498
|
+
region.text_line_height,
|
499
|
+
region.text_line_width,
|
500
|
+
region.direction,
|
501
|
+
)
|
502
|
+
final_order_res_list.extend(header_blocks)
|
503
|
+
|
504
|
+
unsorted_blocks: List[LayoutBlock] = []
|
505
|
+
sorted_blocks_by_pre_cuts: List[LayoutBlock] = []
|
506
|
+
for pre_cut_blocks in pre_cut_list:
|
507
|
+
sorted_blocks: List[LayoutBlock] = []
|
508
|
+
doc_title_blocks: List[LayoutBlock] = []
|
509
|
+
xy_cut_blocks: List[LayoutBlock] = []
|
510
|
+
|
511
|
+
if pre_cut_blocks and pre_cut_blocks[0].label == "region":
|
512
|
+
block_bboxes = np.array([block.bbox for block in pre_cut_blocks])
|
513
|
+
discontinuous = calculate_discontinuous_projection(
|
514
|
+
block_bboxes, direction=region.direction
|
515
|
+
)
|
516
|
+
if len(discontinuous) == 1:
|
517
|
+
get_layout_structure(
|
518
|
+
pre_cut_blocks, region.direction, region.secondary_direction
|
519
|
+
)
|
520
|
+
else:
|
521
|
+
get_layout_structure(
|
522
|
+
pre_cut_blocks, region.direction, region.secondary_direction
|
523
|
+
)
|
524
|
+
|
525
|
+
# Get xy cut blocks and add other blocks in special_block_map
|
526
|
+
for block in pre_cut_blocks:
|
527
|
+
if block.order_label not in [
|
528
|
+
"cross_layout",
|
529
|
+
"cross_reference",
|
530
|
+
"doc_title",
|
531
|
+
"unordered",
|
532
|
+
]:
|
533
|
+
xy_cut_blocks.append(block)
|
534
|
+
elif block.label == "doc_title":
|
535
|
+
doc_title_blocks.append(block)
|
536
|
+
else:
|
537
|
+
unsorted_blocks.append(block)
|
538
|
+
|
539
|
+
if len(xy_cut_blocks) > 0:
|
540
|
+
block_bboxes = np.array([block.bbox for block in xy_cut_blocks])
|
541
|
+
block_text_lines = [block.num_of_lines for block in xy_cut_blocks]
|
542
|
+
discontinuous = calculate_discontinuous_projection(
|
543
|
+
block_bboxes, direction=region.direction
|
544
|
+
)
|
545
|
+
blocks_to_sort = deepcopy(xy_cut_blocks)
|
546
|
+
if region.direction == "vertical":
|
547
|
+
for block in blocks_to_sort:
|
548
|
+
block.bbox = np.array(
|
549
|
+
[-block.bbox[0], block.bbox[1], -block.bbox[2], block.bbox[3]]
|
550
|
+
)
|
551
|
+
if len(discontinuous) == 1 or max(block_text_lines) == 1:
|
552
|
+
blocks_to_sort.sort(
|
553
|
+
key=lambda x: (
|
554
|
+
x.bbox[region.secondary_direction_start_index]
|
555
|
+
// (region.text_line_height // 2),
|
556
|
+
x.bbox[region.direction_start_index],
|
557
|
+
)
|
558
|
+
)
|
559
|
+
blocks_to_sort = shrink_overlapping_boxes(
|
560
|
+
blocks_to_sort, region.secondary_direction
|
561
|
+
)
|
562
|
+
block_bboxes = np.array([block.bbox for block in blocks_to_sort])
|
563
|
+
sorted_indexes = sort_by_xycut(
|
564
|
+
block_bboxes, direction=region.secondary_direction, min_gap=1
|
565
|
+
)
|
566
|
+
else:
|
567
|
+
blocks_to_sort.sort(
|
568
|
+
key=lambda x: (
|
569
|
+
x.bbox[region.direction_start_index]
|
570
|
+
// (region.text_line_width // 2),
|
571
|
+
x.bbox[region.secondary_direction_start_index],
|
572
|
+
)
|
573
|
+
)
|
574
|
+
blocks_to_sort = shrink_overlapping_boxes(
|
575
|
+
blocks_to_sort, region.secondary_direction
|
576
|
+
)
|
577
|
+
block_bboxes = np.array([block.bbox for block in blocks_to_sort])
|
578
|
+
sorted_indexes = sort_by_xycut(
|
579
|
+
block_bboxes, direction=region.direction, min_gap=1
|
580
|
+
)
|
581
|
+
|
582
|
+
sorted_blocks = [
|
583
|
+
region.block_map[blocks_to_sort[i].index] for i in sorted_indexes
|
584
|
+
]
|
585
|
+
sorted_blocks = match_unsorted_blocks(
|
586
|
+
sorted_blocks,
|
587
|
+
doc_title_blocks,
|
588
|
+
region=region,
|
589
|
+
)
|
590
|
+
|
591
|
+
if unsorted_blocks and unsorted_blocks[0].label == "region":
|
592
|
+
sorted_blocks = match_unsorted_blocks(
|
593
|
+
sorted_blocks,
|
594
|
+
unsorted_blocks,
|
595
|
+
region=region,
|
596
|
+
)
|
597
|
+
unsorted_blocks = []
|
598
|
+
sorted_blocks_by_pre_cuts.extend(sorted_blocks)
|
599
|
+
|
600
|
+
final_sorted_blocks = match_unsorted_blocks(
|
601
|
+
sorted_blocks_by_pre_cuts,
|
602
|
+
unsorted_blocks,
|
603
|
+
region=region,
|
604
|
+
)
|
605
|
+
|
606
|
+
final_order_res_list.extend(final_sorted_blocks)
|
607
|
+
final_order_res_list.extend(footer_blocks)
|
608
|
+
final_order_res_list.extend(unordered_blocks)
|
609
|
+
|
610
|
+
for block_idx, block in enumerate(final_order_res_list):
|
611
|
+
final_order_res_list = insert_child_blocks(
|
612
|
+
block, block_idx, final_order_res_list
|
613
|
+
)
|
614
|
+
block = final_order_res_list[block_idx]
|
615
|
+
return final_order_res_list
|
@@ -45,9 +45,9 @@ class BEVDet3DPipeline(BasePipeline):
|
|
45
45
|
device (str): The device to run the prediction on. Default is None.
|
46
46
|
pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
|
47
47
|
use_hpip (bool, optional): Whether to use the high-performance
|
48
|
-
inference plugin (HPIP). Defaults to False.
|
48
|
+
inference plugin (HPIP) by default. Defaults to False.
|
49
49
|
hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
|
50
|
-
The high-performance inference configuration dictionary.
|
50
|
+
The default high-performance inference configuration dictionary.
|
51
51
|
Defaults to None.
|
52
52
|
"""
|
53
53
|
super().__init__(
|
@@ -45,9 +45,9 @@ class MultilingualSpeechRecognitionPipeline(BasePipeline):
|
|
45
45
|
device (str): The device to run the prediction on. Default is None.
|
46
46
|
pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
|
47
47
|
use_hpip (bool, optional): Whether to use the high-performance
|
48
|
-
inference plugin (HPIP). Defaults to False.
|
48
|
+
inference plugin (HPIP) by default. Defaults to False.
|
49
49
|
hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
|
50
|
-
The high-performance inference configuration dictionary.
|
50
|
+
The default high-performance inference configuration dictionary.
|
51
51
|
Defaults to None.
|
52
52
|
"""
|
53
53
|
super().__init__(
|
@@ -20,15 +20,13 @@ from ....utils.deps import pipeline_requires_extra
|
|
20
20
|
from ...models.object_detection.result import DetResult
|
21
21
|
from ...utils.hpi import HPIConfig
|
22
22
|
from ...utils.pp_option import PaddlePredictorOption
|
23
|
+
from .._parallel import AutoParallelImageSimpleInferencePipeline
|
23
24
|
from ..base import BasePipeline
|
24
25
|
|
25
26
|
|
26
|
-
|
27
|
-
class ObjectDetectionPipeline(BasePipeline):
|
27
|
+
class _ObjectDetectionPipeline(BasePipeline):
|
28
28
|
"""Object Detection Pipeline"""
|
29
29
|
|
30
|
-
entities = "object_detection"
|
31
|
-
|
32
30
|
def __init__(
|
33
31
|
self,
|
34
32
|
config: Dict,
|
@@ -45,9 +43,9 @@ class ObjectDetectionPipeline(BasePipeline):
|
|
45
43
|
device (str): The device to run the prediction on. Default is None.
|
46
44
|
pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
|
47
45
|
use_hpip (bool, optional): Whether to use the high-performance
|
48
|
-
inference plugin (HPIP). Defaults to False.
|
46
|
+
inference plugin (HPIP) by default. Defaults to False.
|
49
47
|
hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
|
50
|
-
The high-performance inference configuration dictionary.
|
48
|
+
The default high-performance inference configuration dictionary.
|
51
49
|
Defaults to None.
|
52
50
|
"""
|
53
51
|
super().__init__(
|
@@ -103,3 +101,15 @@ class ObjectDetectionPipeline(BasePipeline):
|
|
103
101
|
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
|
104
102
|
**kwargs,
|
105
103
|
)
|
104
|
+
|
105
|
+
|
106
|
+
@pipeline_requires_extra("cv")
|
107
|
+
class ObjectDetectionPipeline(AutoParallelImageSimpleInferencePipeline):
|
108
|
+
entities = "object_detection"
|
109
|
+
|
110
|
+
@property
|
111
|
+
def _pipeline_cls(self):
|
112
|
+
return _ObjectDetectionPipeline
|
113
|
+
|
114
|
+
def _get_batch_size(self, config):
|
115
|
+
return config["SubModules"]["ObjectDetection"].get("batch_size", 1)
|