paddlex 3.0.0b2__py3-none-any.whl → 3.0.0rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddlex/.version +1 -1
- paddlex/__init__.py +1 -0
- paddlex/__main__.py +3 -4
- paddlex/configs/modules/3d_bev_detection/BEVFusion.yaml +38 -0
- paddlex/configs/modules/face_feature/MobileFaceNet.yaml +41 -0
- paddlex/configs/modules/face_feature/ResNet50_face.yaml +41 -0
- paddlex/configs/modules/formula_recognition/LaTeX_OCR_rec.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet-L.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet-S.yaml +40 -0
- paddlex/configs/modules/formula_recognition/UniMERNet.yaml +40 -0
- paddlex/configs/modules/image_classification/CLIP_vit_base_patch16_224.yaml +41 -0
- paddlex/configs/modules/image_classification/CLIP_vit_large_patch14_224.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_large_384.yaml +41 -0
- paddlex/configs/modules/keypoint_detection/PP-TinyPose_128x96.yaml +40 -0
- paddlex/configs/modules/keypoint_detection/PP-TinyPose_256x192.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-L.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-M.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-S.yaml +40 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_base.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_large.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_medium.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_small.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_tiny.yaml +12 -0
- paddlex/configs/modules/object_detection/Co-DINO-R50.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-DINO-Swin-L.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-Deformable-DETR-R50.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-Deformable-DETR-Swin-T.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-X.yaml +40 -0
- paddlex/configs/modules/open_vocabulary_detection/GroundingDINO-T.yaml +13 -0
- paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_box.yaml +17 -0
- paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_point.yaml +15 -0
- paddlex/configs/modules/rotated_object_detection/PP-YOLOE-R-L.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/MaskFormer_small.yaml +42 -0
- paddlex/configs/modules/semantic_segmentation/MaskFormer_tiny.yaml +42 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_base.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_large.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_small.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_tiny.yaml +40 -0
- paddlex/configs/modules/table_cells_detection/RT-DETR-L_wired_table_cell_det.yaml +40 -0
- paddlex/configs/modules/table_cells_detection/RT-DETR-L_wireless_table_cell_det.yaml +40 -0
- paddlex/configs/modules/table_classification/PP-LCNet_x1_0_table_cls.yaml +41 -0
- paddlex/configs/modules/table_structure_recognition/SLANeXt_wired.yaml +39 -0
- paddlex/configs/modules/table_structure_recognition/SLANeXt_wireless.yaml +39 -0
- paddlex/configs/modules/text_detection/PP-OCRv3_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv3_server_det.yaml +40 -0
- paddlex/configs/modules/text_recognition/PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv4_server_rec_doc.yaml +39 -0
- paddlex/configs/modules/text_recognition/arabic_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/chinese_cht_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/cyrillic_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/devanagari_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/en_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/en_PP-OCRv4_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/japan_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ka_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/korean_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/latin_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ta_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/te_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/textline_orientation/PP-LCNet_x0_25_textline_ori.yaml +41 -0
- paddlex/configs/modules/video_classification/PP-TSM-R50_8frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_16frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_8frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_detection/YOWO.yaml +40 -0
- paddlex/configs/pipelines/3d_bev_detection.yaml +9 -0
- paddlex/configs/pipelines/OCR.yaml +44 -0
- paddlex/configs/pipelines/PP-ChatOCRv3-doc.yaml +149 -0
- paddlex/configs/pipelines/PP-ChatOCRv4-doc.yaml +184 -0
- paddlex/configs/pipelines/PP-ShiTuV2.yaml +18 -0
- paddlex/configs/pipelines/PP-StructureV3.yaml +226 -0
- paddlex/configs/pipelines/anomaly_detection.yaml +8 -0
- paddlex/configs/pipelines/doc_preprocessor.yaml +15 -0
- paddlex/configs/pipelines/face_recognition.yaml +18 -0
- paddlex/configs/pipelines/formula_recognition.yaml +39 -0
- paddlex/configs/pipelines/human_keypoint_detection.yaml +17 -0
- paddlex/configs/pipelines/image_classification.yaml +10 -0
- paddlex/configs/pipelines/image_multilabel_classification.yaml +9 -0
- paddlex/configs/pipelines/instance_segmentation.yaml +10 -0
- paddlex/configs/pipelines/layout_parsing.yaml +101 -0
- paddlex/configs/pipelines/multilingual_speech_recognition.yaml +9 -0
- paddlex/configs/pipelines/object_detection.yaml +10 -0
- paddlex/configs/pipelines/open_vocabulary_detection.yaml +12 -0
- paddlex/configs/pipelines/open_vocabulary_segmentation.yaml +13 -0
- paddlex/configs/pipelines/pedestrian_attribute_recognition.yaml +15 -0
- paddlex/configs/pipelines/rotated_object_detection.yaml +10 -0
- paddlex/configs/pipelines/seal_recognition.yaml +51 -0
- paddlex/configs/pipelines/semantic_segmentation.yaml +10 -0
- paddlex/configs/pipelines/small_object_detection.yaml +10 -0
- paddlex/configs/pipelines/table_recognition.yaml +56 -0
- paddlex/configs/pipelines/table_recognition_v2.yaml +76 -0
- paddlex/configs/pipelines/ts_anomaly_detection.yaml +8 -0
- paddlex/configs/pipelines/ts_classification.yaml +8 -0
- paddlex/configs/pipelines/ts_forecast.yaml +8 -0
- paddlex/configs/pipelines/vehicle_attribute_recognition.yaml +15 -0
- paddlex/configs/pipelines/video_classification.yaml +9 -0
- paddlex/configs/pipelines/video_detection.yaml +10 -0
- paddlex/engine.py +1 -1
- paddlex/hpip_links.html +19 -0
- paddlex/inference/__init__.py +3 -1
- paddlex/inference/common/batch_sampler/__init__.py +20 -0
- paddlex/inference/common/batch_sampler/audio_batch_sampler.py +84 -0
- paddlex/inference/common/batch_sampler/base_batch_sampler.py +90 -0
- paddlex/inference/common/batch_sampler/det_3d_batch_sampler.py +147 -0
- paddlex/inference/common/batch_sampler/image_batch_sampler.py +136 -0
- paddlex/inference/common/batch_sampler/ts_batch_sampler.py +110 -0
- paddlex/inference/common/batch_sampler/video_batch_sampler.py +94 -0
- paddlex/inference/common/reader/__init__.py +19 -0
- paddlex/inference/common/reader/audio_reader.py +46 -0
- paddlex/inference/common/reader/det_3d_reader.py +239 -0
- paddlex/inference/common/reader/image_reader.py +69 -0
- paddlex/inference/common/reader/ts_reader.py +45 -0
- paddlex/inference/common/reader/video_reader.py +42 -0
- paddlex/inference/common/result/__init__.py +29 -0
- paddlex/inference/common/result/base_cv_result.py +31 -0
- paddlex/inference/common/result/base_result.py +70 -0
- paddlex/inference/common/result/base_ts_result.py +42 -0
- paddlex/inference/common/result/base_video_result.py +36 -0
- paddlex/inference/common/result/mixin.py +703 -0
- paddlex/inference/models/3d_bev_detection/__init__.py +15 -0
- paddlex/inference/models/3d_bev_detection/predictor.py +314 -0
- paddlex/inference/models/3d_bev_detection/processors.py +978 -0
- paddlex/inference/models/3d_bev_detection/result.py +65 -0
- paddlex/inference/models/3d_bev_detection/visualizer_3d.py +131 -0
- paddlex/inference/models/__init__.py +37 -13
- paddlex/inference/models/anomaly_detection/__init__.py +15 -0
- paddlex/inference/models/anomaly_detection/predictor.py +145 -0
- paddlex/inference/models/anomaly_detection/processors.py +46 -0
- paddlex/inference/models/anomaly_detection/result.py +70 -0
- paddlex/inference/models/base/__init__.py +1 -2
- paddlex/inference/models/base/predictor/__init__.py +16 -0
- paddlex/inference/models/base/predictor/base_predictor.py +175 -0
- paddlex/inference/models/base/predictor/basic_predictor.py +139 -0
- paddlex/inference/models/common/__init__.py +35 -0
- paddlex/inference/models/common/static_infer.py +329 -0
- paddlex/inference/models/common/tokenizer/__init__.py +17 -0
- paddlex/inference/models/common/tokenizer/bert_tokenizer.py +655 -0
- paddlex/inference/models/common/tokenizer/gpt_tokenizer.py +451 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils.py +2141 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py +3504 -0
- paddlex/inference/models/common/tokenizer/utils.py +66 -0
- paddlex/inference/models/common/tokenizer/vocab.py +647 -0
- paddlex/inference/models/common/ts/__init__.py +15 -0
- paddlex/inference/models/common/ts/funcs.py +533 -0
- paddlex/inference/models/common/ts/processors.py +313 -0
- paddlex/inference/models/common/vision/__init__.py +23 -0
- paddlex/inference/models/common/vision/funcs.py +93 -0
- paddlex/inference/models/common/vision/processors.py +270 -0
- paddlex/inference/models/face_feature/__init__.py +15 -0
- paddlex/inference/models/face_feature/predictor.py +65 -0
- paddlex/inference/models/formula_recognition/__init__.py +15 -0
- paddlex/inference/models/formula_recognition/predictor.py +203 -0
- paddlex/inference/models/formula_recognition/processors.py +986 -0
- paddlex/inference/models/formula_recognition/result.py +403 -0
- paddlex/inference/models/image_classification/__init__.py +15 -0
- paddlex/inference/models/image_classification/predictor.py +182 -0
- paddlex/inference/models/image_classification/processors.py +87 -0
- paddlex/inference/models/image_classification/result.py +92 -0
- paddlex/inference/models/image_feature/__init__.py +15 -0
- paddlex/inference/models/image_feature/predictor.py +156 -0
- paddlex/inference/models/image_feature/processors.py +29 -0
- paddlex/inference/models/image_feature/result.py +33 -0
- paddlex/inference/models/image_multilabel_classification/__init__.py +15 -0
- paddlex/inference/models/image_multilabel_classification/predictor.py +94 -0
- paddlex/inference/models/image_multilabel_classification/processors.py +85 -0
- paddlex/inference/models/image_multilabel_classification/result.py +95 -0
- paddlex/inference/models/image_unwarping/__init__.py +15 -0
- paddlex/inference/models/image_unwarping/predictor.py +105 -0
- paddlex/inference/models/image_unwarping/processors.py +88 -0
- paddlex/inference/models/image_unwarping/result.py +45 -0
- paddlex/inference/models/instance_segmentation/__init__.py +15 -0
- paddlex/inference/models/instance_segmentation/predictor.py +210 -0
- paddlex/inference/models/instance_segmentation/processors.py +105 -0
- paddlex/inference/models/instance_segmentation/result.py +161 -0
- paddlex/inference/models/keypoint_detection/__init__.py +15 -0
- paddlex/inference/models/keypoint_detection/predictor.py +188 -0
- paddlex/inference/models/keypoint_detection/processors.py +359 -0
- paddlex/inference/models/keypoint_detection/result.py +192 -0
- paddlex/inference/models/multilingual_speech_recognition/__init__.py +15 -0
- paddlex/inference/models/multilingual_speech_recognition/predictor.py +141 -0
- paddlex/inference/models/multilingual_speech_recognition/processors.py +1941 -0
- paddlex/inference/models/multilingual_speech_recognition/result.py +21 -0
- paddlex/inference/models/object_detection/__init__.py +15 -0
- paddlex/inference/models/object_detection/predictor.py +348 -0
- paddlex/inference/models/object_detection/processors.py +855 -0
- paddlex/inference/models/object_detection/result.py +113 -0
- paddlex/inference/models/object_detection/utils.py +68 -0
- paddlex/inference/models/open_vocabulary_detection/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_detection/predictor.py +155 -0
- paddlex/inference/models/open_vocabulary_detection/processors/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_detection/processors/groundingdino_processors.py +485 -0
- paddlex/inference/models/open_vocabulary_segmentation/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/predictor.py +120 -0
- paddlex/inference/models/open_vocabulary_segmentation/processors/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/processors/sam_processer.py +249 -0
- paddlex/inference/models/open_vocabulary_segmentation/results/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/results/sam_result.py +147 -0
- paddlex/inference/models/semantic_segmentation/__init__.py +15 -0
- paddlex/inference/models/semantic_segmentation/predictor.py +167 -0
- paddlex/inference/models/semantic_segmentation/processors.py +114 -0
- paddlex/inference/models/semantic_segmentation/result.py +72 -0
- paddlex/inference/models/table_structure_recognition/__init__.py +15 -0
- paddlex/inference/models/table_structure_recognition/predictor.py +171 -0
- paddlex/inference/models/table_structure_recognition/processors.py +235 -0
- paddlex/inference/models/table_structure_recognition/result.py +70 -0
- paddlex/inference/models/text_detection/__init__.py +15 -0
- paddlex/inference/models/text_detection/predictor.py +191 -0
- paddlex/inference/models/text_detection/processors.py +466 -0
- paddlex/inference/models/text_detection/result.py +51 -0
- paddlex/inference/models/text_recognition/__init__.py +15 -0
- paddlex/inference/models/text_recognition/predictor.py +106 -0
- paddlex/inference/models/text_recognition/processors.py +231 -0
- paddlex/inference/models/text_recognition/result.py +75 -0
- paddlex/inference/models/ts_anomaly_detection/__init__.py +15 -0
- paddlex/inference/models/ts_anomaly_detection/predictor.py +146 -0
- paddlex/inference/models/ts_anomaly_detection/processors.py +94 -0
- paddlex/inference/models/ts_anomaly_detection/result.py +72 -0
- paddlex/inference/models/ts_classification/__init__.py +15 -0
- paddlex/inference/models/ts_classification/predictor.py +135 -0
- paddlex/inference/models/ts_classification/processors.py +117 -0
- paddlex/inference/models/ts_classification/result.py +78 -0
- paddlex/inference/models/ts_forecasting/__init__.py +15 -0
- paddlex/inference/models/ts_forecasting/predictor.py +159 -0
- paddlex/inference/models/ts_forecasting/processors.py +149 -0
- paddlex/inference/models/ts_forecasting/result.py +83 -0
- paddlex/inference/models/video_classification/__init__.py +15 -0
- paddlex/inference/models/video_classification/predictor.py +147 -0
- paddlex/inference/models/video_classification/processors.py +409 -0
- paddlex/inference/models/video_classification/result.py +92 -0
- paddlex/inference/models/video_detection/__init__.py +15 -0
- paddlex/inference/models/video_detection/predictor.py +136 -0
- paddlex/inference/models/video_detection/processors.py +450 -0
- paddlex/inference/models/video_detection/result.py +104 -0
- paddlex/inference/pipelines/3d_bev_detection/__init__.py +15 -0
- paddlex/inference/pipelines/3d_bev_detection/pipeline.py +67 -0
- paddlex/inference/pipelines/__init__.py +174 -73
- paddlex/inference/pipelines/anomaly_detection/__init__.py +15 -0
- paddlex/inference/pipelines/anomaly_detection/pipeline.py +62 -0
- paddlex/inference/pipelines/attribute_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/attribute_recognition/pipeline.py +105 -0
- paddlex/inference/pipelines/attribute_recognition/result.py +100 -0
- paddlex/inference/pipelines/base.py +103 -57
- paddlex/inference/pipelines/components/__init__.py +23 -0
- paddlex/inference/pipelines/components/chat_server/__init__.py +16 -0
- paddlex/inference/pipelines/components/chat_server/base.py +39 -0
- paddlex/inference/pipelines/components/chat_server/openai_bot_chat.py +236 -0
- paddlex/inference/pipelines/components/common/__init__.py +18 -0
- paddlex/inference/pipelines/components/common/base_operator.py +36 -0
- paddlex/inference/pipelines/components/common/base_result.py +65 -0
- paddlex/inference/pipelines/components/common/convert_points_and_boxes.py +46 -0
- paddlex/inference/pipelines/components/common/crop_image_regions.py +550 -0
- paddlex/inference/pipelines/components/common/seal_det_warp.py +941 -0
- paddlex/inference/pipelines/components/common/sort_boxes.py +83 -0
- paddlex/inference/pipelines/components/faisser.py +352 -0
- paddlex/inference/pipelines/components/prompt_engineering/__init__.py +16 -0
- paddlex/inference/pipelines/components/prompt_engineering/base.py +35 -0
- paddlex/inference/pipelines/components/prompt_engineering/generate_ensemble_prompt.py +127 -0
- paddlex/inference/pipelines/components/prompt_engineering/generate_kie_prompt.py +148 -0
- paddlex/inference/pipelines/components/retriever/__init__.py +16 -0
- paddlex/inference/pipelines/components/retriever/base.py +226 -0
- paddlex/inference/pipelines/components/retriever/openai_bot_retriever.py +70 -0
- paddlex/inference/pipelines/components/retriever/qianfan_bot_retriever.py +163 -0
- paddlex/inference/pipelines/components/utils/__init__.py +13 -0
- paddlex/inference/pipelines/components/utils/mixin.py +206 -0
- paddlex/inference/pipelines/doc_preprocessor/__init__.py +15 -0
- paddlex/inference/pipelines/doc_preprocessor/pipeline.py +190 -0
- paddlex/inference/pipelines/doc_preprocessor/result.py +103 -0
- paddlex/inference/pipelines/face_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/face_recognition/pipeline.py +61 -0
- paddlex/inference/pipelines/face_recognition/result.py +43 -0
- paddlex/inference/pipelines/formula_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/formula_recognition/pipeline.py +303 -0
- paddlex/inference/pipelines/formula_recognition/result.py +291 -0
- paddlex/inference/pipelines/image_classification/__init__.py +15 -0
- paddlex/inference/pipelines/image_classification/pipeline.py +71 -0
- paddlex/inference/pipelines/image_multilabel_classification/__init__.py +15 -0
- paddlex/inference/pipelines/image_multilabel_classification/pipeline.py +78 -0
- paddlex/inference/pipelines/instance_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/instance_segmentation/pipeline.py +70 -0
- paddlex/inference/pipelines/keypoint_detection/__init__.py +15 -0
- paddlex/inference/pipelines/keypoint_detection/pipeline.py +137 -0
- paddlex/inference/pipelines/layout_parsing/__init__.py +2 -1
- paddlex/inference/pipelines/layout_parsing/pipeline.py +570 -0
- paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +739 -0
- paddlex/inference/pipelines/layout_parsing/result.py +203 -0
- paddlex/inference/pipelines/layout_parsing/result_v2.py +470 -0
- paddlex/inference/pipelines/layout_parsing/utils.py +2385 -0
- paddlex/inference/pipelines/multilingual_speech_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/multilingual_speech_recognition/pipeline.py +67 -0
- paddlex/inference/pipelines/object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/object_detection/pipeline.py +95 -0
- paddlex/inference/pipelines/ocr/__init__.py +15 -0
- paddlex/inference/pipelines/ocr/pipeline.py +389 -0
- paddlex/inference/pipelines/ocr/result.py +248 -0
- paddlex/inference/pipelines/open_vocabulary_detection/__init__.py +15 -0
- paddlex/inference/pipelines/open_vocabulary_detection/pipeline.py +75 -0
- paddlex/inference/pipelines/open_vocabulary_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/open_vocabulary_segmentation/pipeline.py +89 -0
- paddlex/inference/pipelines/pp_chatocr/__init__.py +16 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_base.py +102 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_v3.py +773 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +977 -0
- paddlex/inference/pipelines/pp_shitu_v2/__init__.py +15 -0
- paddlex/inference/pipelines/pp_shitu_v2/pipeline.py +152 -0
- paddlex/inference/pipelines/pp_shitu_v2/result.py +126 -0
- paddlex/inference/pipelines/rotated_object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/rotated_object_detection/pipeline.py +74 -0
- paddlex/inference/pipelines/seal_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/seal_recognition/pipeline.py +271 -0
- paddlex/inference/pipelines/seal_recognition/result.py +87 -0
- paddlex/inference/pipelines/semantic_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/semantic_segmentation/pipeline.py +74 -0
- paddlex/inference/pipelines/small_object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/small_object_detection/pipeline.py +74 -0
- paddlex/inference/pipelines/table_recognition/__init__.py +2 -1
- paddlex/inference/pipelines/table_recognition/pipeline.py +462 -0
- paddlex/inference/pipelines/table_recognition/pipeline_v2.py +792 -0
- paddlex/inference/pipelines/table_recognition/result.py +216 -0
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing.py +362 -0
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing_v2.py +470 -0
- paddlex/inference/pipelines/table_recognition/utils.py +23 -436
- paddlex/inference/pipelines/ts_anomaly_detection/__init__.py +15 -0
- paddlex/inference/pipelines/ts_anomaly_detection/pipeline.py +62 -0
- paddlex/inference/pipelines/ts_classification/__init__.py +15 -0
- paddlex/inference/pipelines/ts_classification/pipeline.py +62 -0
- paddlex/inference/pipelines/ts_forecasting/__init__.py +15 -0
- paddlex/inference/pipelines/ts_forecasting/pipeline.py +62 -0
- paddlex/inference/pipelines/video_classification/__init__.py +15 -0
- paddlex/inference/pipelines/video_classification/pipeline.py +68 -0
- paddlex/inference/pipelines/video_detection/__init__.py +15 -0
- paddlex/inference/pipelines/video_detection/pipeline.py +73 -0
- paddlex/inference/serving/__init__.py +13 -0
- paddlex/inference/serving/basic_serving/__init__.py +18 -0
- paddlex/inference/serving/basic_serving/_app.py +209 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/__init__.py +41 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/__init__.py +13 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/common.py +96 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/image_recognition.py +36 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/ocr.py +90 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/anomaly_detection.py +64 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/doc_preprocessor.py +97 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/face_recognition.py +223 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/formula_recognition.py +97 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/human_keypoint_detection.py +78 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/image_classification.py +66 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/image_multilabel_classification.py +70 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/instance_segmentation.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py +115 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/m_3d_bev_detection.py +76 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/multilingual_speech_recognition.py +89 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/object_detection.py +74 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ocr.py +99 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_detection.py +78 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_segmentation.py +85 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pedestrian_attribute_recognition.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py +191 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py +221 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_shituv2.py +218 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py +136 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/rotated_object_detection.py +78 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/seal_recognition.py +103 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/semantic_segmentation.py +64 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/small_object_detection.py +69 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py +105 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py +107 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_anomaly_detection.py +62 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_classification.py +61 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_forecast.py +62 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/vehicle_attribute_recognition.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/video_classification.py +73 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/video_detection.py +89 -0
- paddlex/inference/serving/basic_serving/_server.py +35 -0
- paddlex/inference/serving/infra/__init__.py +13 -0
- paddlex/inference/serving/infra/config.py +36 -0
- paddlex/inference/serving/infra/models.py +72 -0
- paddlex/inference/serving/infra/storage.py +175 -0
- paddlex/inference/serving/infra/utils.py +259 -0
- paddlex/inference/serving/schemas/__init__.py +13 -0
- paddlex/inference/serving/schemas/anomaly_detection.py +39 -0
- paddlex/inference/serving/schemas/doc_preprocessor.py +54 -0
- paddlex/inference/serving/schemas/face_recognition.py +124 -0
- paddlex/inference/serving/schemas/formula_recognition.py +56 -0
- paddlex/inference/serving/schemas/human_keypoint_detection.py +55 -0
- paddlex/inference/serving/schemas/image_classification.py +45 -0
- paddlex/inference/serving/schemas/image_multilabel_classification.py +47 -0
- paddlex/inference/serving/schemas/instance_segmentation.py +53 -0
- paddlex/inference/serving/schemas/layout_parsing.py +72 -0
- paddlex/inference/serving/schemas/m_3d_bev_detection.py +48 -0
- paddlex/inference/serving/schemas/multilingual_speech_recognition.py +57 -0
- paddlex/inference/serving/schemas/object_detection.py +52 -0
- paddlex/inference/serving/schemas/ocr.py +60 -0
- paddlex/inference/serving/schemas/open_vocabulary_detection.py +52 -0
- paddlex/inference/serving/schemas/open_vocabulary_segmentation.py +52 -0
- paddlex/inference/serving/schemas/pedestrian_attribute_recognition.py +61 -0
- paddlex/inference/serving/schemas/pp_chatocrv3_doc.py +134 -0
- paddlex/inference/serving/schemas/pp_chatocrv4_doc.py +151 -0
- paddlex/inference/serving/schemas/pp_shituv2.py +124 -0
- paddlex/inference/serving/schemas/pp_structurev3.py +84 -0
- paddlex/inference/serving/schemas/rotated_object_detection.py +52 -0
- paddlex/inference/serving/schemas/seal_recognition.py +62 -0
- paddlex/inference/serving/schemas/semantic_segmentation.py +45 -0
- paddlex/inference/serving/schemas/shared/__init__.py +13 -0
- paddlex/inference/serving/schemas/shared/classification.py +23 -0
- paddlex/inference/serving/schemas/shared/image_segmentation.py +28 -0
- paddlex/inference/serving/schemas/shared/object_detection.py +24 -0
- paddlex/inference/serving/schemas/shared/ocr.py +25 -0
- paddlex/inference/serving/schemas/small_object_detection.py +52 -0
- paddlex/inference/serving/schemas/table_recognition.py +64 -0
- paddlex/inference/serving/schemas/table_recognition_v2.py +66 -0
- paddlex/inference/serving/schemas/ts_anomaly_detection.py +37 -0
- paddlex/inference/serving/schemas/ts_classification.py +38 -0
- paddlex/inference/serving/schemas/ts_forecast.py +37 -0
- paddlex/inference/serving/schemas/vehicle_attribute_recognition.py +61 -0
- paddlex/inference/serving/schemas/video_classification.py +44 -0
- paddlex/inference/serving/schemas/video_detection.py +56 -0
- paddlex/inference/utils/benchmark.py +23 -11
- paddlex/inference/utils/get_pipeline_path.py +2 -1
- paddlex/inference/utils/io/__init__.py +3 -0
- paddlex/inference/utils/io/readers.py +164 -17
- paddlex/inference/utils/io/writers.py +85 -2
- paddlex/inference/utils/new_ir_blacklist.py +6 -0
- paddlex/inference/utils/official_models.py +277 -211
- paddlex/inference/utils/pp_option.py +24 -4
- paddlex/model.py +12 -5
- paddlex/modules/3d_bev_detection/__init__.py +18 -0
- paddlex/modules/3d_bev_detection/dataset_checker/__init__.py +95 -0
- paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/__init__.py +17 -0
- paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/analyse_dataset.py +106 -0
- paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/check_dataset.py +102 -0
- paddlex/modules/3d_bev_detection/evaluator.py +46 -0
- paddlex/modules/3d_bev_detection/exportor.py +22 -0
- paddlex/modules/3d_bev_detection/model_list.py +18 -0
- paddlex/modules/3d_bev_detection/trainer.py +70 -0
- paddlex/modules/__init__.py +34 -1
- paddlex/modules/base/build_model.py +1 -1
- paddlex/modules/base/dataset_checker/dataset_checker.py +6 -1
- paddlex/modules/base/evaluator.py +20 -4
- paddlex/modules/base/exportor.py +30 -5
- paddlex/modules/base/trainer.py +29 -6
- paddlex/modules/face_recognition/trainer.py +1 -23
- paddlex/modules/formula_recognition/__init__.py +5 -0
- paddlex/modules/formula_recognition/dataset_checker/__init__.py +113 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/analyse_dataset.py +157 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/check_dataset.py +80 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/convert_dataset.py +94 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/split_dataset.py +81 -0
- paddlex/modules/formula_recognition/evaluator.py +77 -0
- paddlex/modules/formula_recognition/exportor.py +22 -0
- paddlex/modules/formula_recognition/model_list.py +3 -0
- paddlex/modules/formula_recognition/trainer.py +121 -0
- paddlex/modules/image_classification/model_list.py +2 -0
- paddlex/modules/instance_segmentation/dataset_checker/__init__.py +15 -0
- paddlex/modules/keypoint_detection/__init__.py +18 -0
- paddlex/modules/keypoint_detection/dataset_checker/__init__.py +56 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/__init__.py +15 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/check_dataset.py +86 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/visualizer.py +119 -0
- paddlex/modules/keypoint_detection/evaluator.py +41 -0
- paddlex/modules/keypoint_detection/exportor.py +22 -0
- paddlex/modules/keypoint_detection/model_list.py +16 -0
- paddlex/modules/keypoint_detection/trainer.py +39 -0
- paddlex/modules/multilingual_speech_recognition/__init__.py +18 -0
- paddlex/modules/multilingual_speech_recognition/dataset_checker.py +27 -0
- paddlex/modules/multilingual_speech_recognition/evaluator.py +27 -0
- paddlex/modules/multilingual_speech_recognition/exportor.py +27 -0
- paddlex/modules/multilingual_speech_recognition/model_list.py +22 -0
- paddlex/modules/multilingual_speech_recognition/trainer.py +40 -0
- paddlex/modules/object_detection/evaluator.py +12 -1
- paddlex/modules/object_detection/model_list.py +10 -0
- paddlex/modules/object_detection/trainer.py +15 -1
- paddlex/modules/open_vocabulary_detection/__init__.py +18 -0
- paddlex/modules/open_vocabulary_detection/dataset_checker.py +29 -0
- paddlex/modules/open_vocabulary_detection/evaluator.py +29 -0
- paddlex/modules/open_vocabulary_detection/exportor.py +29 -0
- paddlex/modules/open_vocabulary_detection/model_list.py +18 -0
- paddlex/modules/open_vocabulary_detection/trainer.py +42 -0
- paddlex/modules/open_vocabulary_segmentation/__init__.py +18 -0
- paddlex/modules/open_vocabulary_segmentation/dataset_checker.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/evaluator.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/exportor.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/model_list.py +19 -0
- paddlex/modules/open_vocabulary_segmentation/trainer.py +42 -0
- paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +15 -0
- paddlex/modules/semantic_segmentation/exportor.py +9 -0
- paddlex/modules/semantic_segmentation/model_list.py +2 -0
- paddlex/modules/semantic_segmentation/trainer.py +2 -0
- paddlex/modules/table_recognition/dataset_checker/__init__.py +16 -1
- paddlex/modules/table_recognition/dataset_checker/dataset_src/check_dataset.py +13 -14
- paddlex/modules/table_recognition/model_list.py +2 -0
- paddlex/modules/text_detection/dataset_checker/__init__.py +16 -1
- paddlex/modules/text_detection/dataset_checker/dataset_src/check_dataset.py +13 -3
- paddlex/modules/text_detection/model_list.py +2 -0
- paddlex/modules/text_recognition/dataset_checker/__init__.py +16 -4
- paddlex/modules/text_recognition/dataset_checker/dataset_src/check_dataset.py +13 -3
- paddlex/modules/text_recognition/evaluator.py +4 -3
- paddlex/modules/text_recognition/exportor.py +0 -3
- paddlex/modules/text_recognition/model_list.py +14 -0
- paddlex/modules/text_recognition/trainer.py +4 -3
- paddlex/modules/ts_anomaly_detection/dataset_checker/__init__.py +15 -0
- paddlex/modules/ts_anomaly_detection/trainer.py +17 -1
- paddlex/modules/ts_classification/dataset_checker/__init__.py +15 -0
- paddlex/modules/ts_classification/trainer.py +17 -1
- paddlex/modules/ts_forecast/dataset_checker/__init__.py +15 -0
- paddlex/modules/ts_forecast/trainer.py +17 -1
- paddlex/modules/video_classification/__init__.py +18 -0
- paddlex/modules/video_classification/dataset_checker/__init__.py +93 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/__init__.py +18 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/analyse_dataset.py +93 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/check_dataset.py +121 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/split_dataset.py +82 -0
- paddlex/modules/video_classification/evaluator.py +44 -0
- paddlex/modules/video_classification/exportor.py +22 -0
- paddlex/modules/video_classification/model_list.py +19 -0
- paddlex/modules/video_classification/trainer.py +88 -0
- paddlex/modules/video_detection/__init__.py +18 -0
- paddlex/modules/video_detection/dataset_checker/__init__.py +86 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/__init__.py +17 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/analyse_dataset.py +101 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/check_dataset.py +134 -0
- paddlex/modules/video_detection/evaluator.py +42 -0
- paddlex/modules/video_detection/exportor.py +22 -0
- paddlex/modules/video_detection/model_list.py +15 -0
- paddlex/modules/video_detection/trainer.py +82 -0
- paddlex/ops/__init__.py +149 -0
- paddlex/ops/iou3d_nms/iou3d_cpu.cpp +264 -0
- paddlex/ops/iou3d_nms/iou3d_cpu.h +27 -0
- paddlex/ops/iou3d_nms/iou3d_nms.cpp +204 -0
- paddlex/ops/iou3d_nms/iou3d_nms.h +33 -0
- paddlex/ops/iou3d_nms/iou3d_nms_api.cpp +108 -0
- paddlex/ops/iou3d_nms/iou3d_nms_kernel.cu +482 -0
- paddlex/ops/setup.py +37 -0
- paddlex/ops/voxel/voxelize_op.cc +191 -0
- paddlex/ops/voxel/voxelize_op.cu +346 -0
- paddlex/paddle2onnx_requirements.txt +1 -0
- paddlex/paddlex_cli.py +339 -72
- paddlex/repo_apis/Paddle3D_api/__init__.py +17 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/__init__.py +18 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/config.py +118 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py +238 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/register.py +55 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/runner.py +104 -0
- paddlex/repo_apis/Paddle3D_api/pp3d_config.py +144 -0
- paddlex/repo_apis/PaddleClas_api/cls/model.py +6 -0
- paddlex/repo_apis/PaddleClas_api/cls/register.py +20 -2
- paddlex/repo_apis/PaddleDetection_api/instance_seg/config.py +8 -4
- paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +6 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/config.py +27 -5
- paddlex/repo_apis/PaddleDetection_api/object_det/model.py +6 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +81 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/register.py +182 -3
- paddlex/repo_apis/PaddleOCR_api/__init__.py +1 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/__init__.py +16 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py +570 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py +402 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/register.py +73 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py +240 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +21 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +6 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +126 -7
- paddlex/repo_apis/PaddleSeg_api/seg/config.py +9 -0
- paddlex/repo_apis/PaddleSeg_api/seg/model.py +10 -0
- paddlex/repo_apis/PaddleSeg_api/seg/register.py +20 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/config.py +24 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/model.py +11 -7
- paddlex/repo_apis/PaddleVideo_api/__init__.py +17 -0
- paddlex/repo_apis/PaddleVideo_api/config_utils.py +51 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/__init__.py +19 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/config.py +547 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/model.py +346 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/register.py +71 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py +205 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/__init__.py +19 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/config.py +548 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/model.py +298 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/register.py +45 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/runner.py +200 -0
- paddlex/repo_apis/base/runner.py +2 -1
- paddlex/repo_manager/meta.py +29 -2
- paddlex/repo_manager/repo.py +24 -5
- paddlex/repo_manager/requirements.txt +10 -7
- paddlex/repo_manager/utils.py +62 -1
- paddlex/serving_requirements.txt +9 -0
- paddlex/utils/config.py +4 -3
- paddlex/utils/custom_device_whitelist.py +457 -0
- paddlex/utils/device.py +74 -26
- paddlex/utils/env.py +28 -0
- paddlex/utils/flags.py +4 -0
- paddlex/utils/fonts/__init__.py +48 -5
- paddlex/utils/lazy_loader.py +2 -0
- paddlex/utils/logging.py +1 -2
- paddlex/utils/pipeline_arguments.py +711 -0
- paddlex-3.0.0rc0.dist-info/METADATA +1035 -0
- paddlex-3.0.0rc0.dist-info/RECORD +1015 -0
- paddlex-3.0.0rc0.dist-info/WHEEL +5 -0
- paddlex/configs/face_recognition/MobileFaceNet.yaml +0 -44
- paddlex/configs/face_recognition/ResNet50_face.yaml +0 -44
- paddlex/configs/formula_recognition/LaTeX_OCR_rec.yaml +0 -40
- paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml +0 -41
- paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml +0 -41
- paddlex/configs/image_classification/ConvNeXt_large_384.yaml +0 -41
- paddlex/configs/object_detection/YOLOX-X.yaml +0 -40
- paddlex/configs/semantic_segmentation/SeaFormer_base.yaml +0 -40
- paddlex/configs/semantic_segmentation/SeaFormer_large.yaml +0 -40
- paddlex/configs/semantic_segmentation/SeaFormer_small.yaml +0 -40
- paddlex/configs/semantic_segmentation/SeaFormer_tiny.yaml +0 -40
- paddlex/inference/components/__init__.py +0 -18
- paddlex/inference/components/base.py +0 -292
- paddlex/inference/components/llm/__init__.py +0 -25
- paddlex/inference/components/llm/base.py +0 -65
- paddlex/inference/components/llm/erniebot.py +0 -212
- paddlex/inference/components/paddle_predictor/__init__.py +0 -20
- paddlex/inference/components/paddle_predictor/predictor.py +0 -332
- paddlex/inference/components/retrieval/__init__.py +0 -15
- paddlex/inference/components/retrieval/faiss.py +0 -359
- paddlex/inference/components/task_related/__init__.py +0 -33
- paddlex/inference/components/task_related/clas.py +0 -124
- paddlex/inference/components/task_related/det.py +0 -284
- paddlex/inference/components/task_related/instance_seg.py +0 -89
- paddlex/inference/components/task_related/seal_det_warp.py +0 -940
- paddlex/inference/components/task_related/seg.py +0 -40
- paddlex/inference/components/task_related/table_rec.py +0 -191
- paddlex/inference/components/task_related/text_det.py +0 -895
- paddlex/inference/components/task_related/text_rec.py +0 -353
- paddlex/inference/components/task_related/warp.py +0 -43
- paddlex/inference/components/transforms/__init__.py +0 -16
- paddlex/inference/components/transforms/image/__init__.py +0 -15
- paddlex/inference/components/transforms/image/common.py +0 -598
- paddlex/inference/components/transforms/image/funcs.py +0 -58
- paddlex/inference/components/transforms/read_data.py +0 -67
- paddlex/inference/components/transforms/ts/__init__.py +0 -15
- paddlex/inference/components/transforms/ts/common.py +0 -393
- paddlex/inference/components/transforms/ts/funcs.py +0 -424
- paddlex/inference/models/anomaly_detection.py +0 -87
- paddlex/inference/models/base/base_predictor.py +0 -76
- paddlex/inference/models/base/basic_predictor.py +0 -122
- paddlex/inference/models/face_recognition.py +0 -21
- paddlex/inference/models/formula_recognition.py +0 -55
- paddlex/inference/models/general_recognition.py +0 -99
- paddlex/inference/models/image_classification.py +0 -101
- paddlex/inference/models/image_unwarping.py +0 -43
- paddlex/inference/models/instance_segmentation.py +0 -66
- paddlex/inference/models/multilabel_classification.py +0 -33
- paddlex/inference/models/object_detection.py +0 -129
- paddlex/inference/models/semantic_segmentation.py +0 -86
- paddlex/inference/models/table_recognition.py +0 -106
- paddlex/inference/models/text_detection.py +0 -105
- paddlex/inference/models/text_recognition.py +0 -78
- paddlex/inference/models/ts_ad.py +0 -68
- paddlex/inference/models/ts_cls.py +0 -57
- paddlex/inference/models/ts_fc.py +0 -73
- paddlex/inference/pipelines/attribute_recognition.py +0 -92
- paddlex/inference/pipelines/face_recognition.py +0 -49
- paddlex/inference/pipelines/formula_recognition.py +0 -102
- paddlex/inference/pipelines/layout_parsing/layout_parsing.py +0 -362
- paddlex/inference/pipelines/ocr.py +0 -80
- paddlex/inference/pipelines/pp_shitu_v2.py +0 -152
- paddlex/inference/pipelines/ppchatocrv3/__init__.py +0 -15
- paddlex/inference/pipelines/ppchatocrv3/ch_prompt.yaml +0 -14
- paddlex/inference/pipelines/ppchatocrv3/ppchatocrv3.py +0 -717
- paddlex/inference/pipelines/ppchatocrv3/utils.py +0 -168
- paddlex/inference/pipelines/seal_recognition.py +0 -152
- paddlex/inference/pipelines/serving/__init__.py +0 -17
- paddlex/inference/pipelines/serving/_pipeline_apps/__init__.py +0 -205
- paddlex/inference/pipelines/serving/_pipeline_apps/anomaly_detection.py +0 -80
- paddlex/inference/pipelines/serving/_pipeline_apps/face_recognition.py +0 -317
- paddlex/inference/pipelines/serving/_pipeline_apps/formula_recognition.py +0 -119
- paddlex/inference/pipelines/serving/_pipeline_apps/image_classification.py +0 -101
- paddlex/inference/pipelines/serving/_pipeline_apps/instance_segmentation.py +0 -112
- paddlex/inference/pipelines/serving/_pipeline_apps/layout_parsing.py +0 -205
- paddlex/inference/pipelines/serving/_pipeline_apps/multi_label_image_classification.py +0 -90
- paddlex/inference/pipelines/serving/_pipeline_apps/object_detection.py +0 -90
- paddlex/inference/pipelines/serving/_pipeline_apps/ocr.py +0 -98
- paddlex/inference/pipelines/serving/_pipeline_apps/pedestrian_attribute_recognition.py +0 -102
- paddlex/inference/pipelines/serving/_pipeline_apps/pp_shitu_v2.py +0 -319
- paddlex/inference/pipelines/serving/_pipeline_apps/ppchatocrv3.py +0 -445
- paddlex/inference/pipelines/serving/_pipeline_apps/seal_recognition.py +0 -110
- paddlex/inference/pipelines/serving/_pipeline_apps/semantic_segmentation.py +0 -82
- paddlex/inference/pipelines/serving/_pipeline_apps/small_object_detection.py +0 -92
- paddlex/inference/pipelines/serving/_pipeline_apps/table_recognition.py +0 -110
- paddlex/inference/pipelines/serving/_pipeline_apps/ts_ad.py +0 -68
- paddlex/inference/pipelines/serving/_pipeline_apps/ts_cls.py +0 -68
- paddlex/inference/pipelines/serving/_pipeline_apps/ts_fc.py +0 -68
- paddlex/inference/pipelines/serving/_pipeline_apps/vehicle_attribute_recognition.py +0 -102
- paddlex/inference/pipelines/serving/app.py +0 -164
- paddlex/inference/pipelines/serving/models.py +0 -30
- paddlex/inference/pipelines/serving/server.py +0 -25
- paddlex/inference/pipelines/serving/storage.py +0 -161
- paddlex/inference/pipelines/serving/utils.py +0 -190
- paddlex/inference/pipelines/single_model_pipeline.py +0 -76
- paddlex/inference/pipelines/table_recognition/table_recognition.py +0 -193
- paddlex/inference/results/__init__.py +0 -31
- paddlex/inference/results/attribute_rec.py +0 -89
- paddlex/inference/results/base.py +0 -43
- paddlex/inference/results/chat_ocr.py +0 -158
- paddlex/inference/results/clas.py +0 -133
- paddlex/inference/results/det.py +0 -86
- paddlex/inference/results/face_rec.py +0 -34
- paddlex/inference/results/formula_rec.py +0 -363
- paddlex/inference/results/instance_seg.py +0 -152
- paddlex/inference/results/ocr.py +0 -157
- paddlex/inference/results/seal_rec.py +0 -50
- paddlex/inference/results/seg.py +0 -72
- paddlex/inference/results/shitu.py +0 -35
- paddlex/inference/results/table_rec.py +0 -109
- paddlex/inference/results/text_det.py +0 -33
- paddlex/inference/results/text_rec.py +0 -66
- paddlex/inference/results/ts.py +0 -37
- paddlex/inference/results/utils/mixin.py +0 -204
- paddlex/inference/results/warp.py +0 -31
- paddlex/inference/utils/process_hook.py +0 -54
- paddlex/pipelines/OCR.yaml +0 -8
- paddlex/pipelines/PP-ChatOCRv3-doc.yaml +0 -27
- paddlex/pipelines/PP-ShiTuV2.yaml +0 -13
- paddlex/pipelines/anomaly_detection.yaml +0 -7
- paddlex/pipelines/face_recognition.yaml +0 -13
- paddlex/pipelines/formula_recognition.yaml +0 -8
- paddlex/pipelines/image_classification.yaml +0 -7
- paddlex/pipelines/instance_segmentation.yaml +0 -7
- paddlex/pipelines/layout_parsing.yaml +0 -14
- paddlex/pipelines/multi_label_image_classification.yaml +0 -7
- paddlex/pipelines/object_detection.yaml +0 -7
- paddlex/pipelines/pedestrian_attribute_recognition.yaml +0 -7
- paddlex/pipelines/seal_recognition.yaml +0 -10
- paddlex/pipelines/semantic_segmentation.yaml +0 -7
- paddlex/pipelines/small_object_detection.yaml +0 -7
- paddlex/pipelines/table_recognition.yaml +0 -12
- paddlex/pipelines/ts_ad.yaml +0 -7
- paddlex/pipelines/ts_cls.yaml +0 -7
- paddlex/pipelines/ts_fc.yaml +0 -7
- paddlex/pipelines/vehicle_attribute_recognition.yaml +0 -7
- paddlex/utils/fonts/PingFang-SC-Regular.ttf +0 -0
- paddlex-3.0.0b2.dist-info/METADATA +0 -760
- paddlex-3.0.0b2.dist-info/RECORD +0 -646
- paddlex-3.0.0b2.dist-info/WHEEL +0 -5
- /paddlex/configs/{doc_text_orientation → modules/doc_text_orientation}/PP-LCNet_x1_0_doc_ori.yaml +0 -0
- /paddlex/configs/{face_detection → modules/face_detection}/BlazeFace-FPN-SSH.yaml +0 -0
- /paddlex/configs/{face_detection → modules/face_detection}/BlazeFace.yaml +0 -0
- /paddlex/configs/{face_detection → modules/face_detection}/PP-YOLOE_plus-S_face.yaml +0 -0
- /paddlex/configs/{face_detection → modules/face_detection}/PicoDet_LCNet_x2_5_face.yaml +0 -0
- /paddlex/configs/{human_detection → modules/human_detection}/PP-YOLOE-L_human.yaml +0 -0
- /paddlex/configs/{human_detection → modules/human_detection}/PP-YOLOE-S_human.yaml +0 -0
- /paddlex/configs/{anomaly_detection → modules/image_anomaly_detection}/STFPM.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_base_224.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_base_384.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_large_224.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_small.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_tiny.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-L.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-M.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-S.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T1.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T2.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_25.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_75.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x1_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x0_25.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x0_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x1_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x1_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x2_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_35.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_75.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x1_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x1_25.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_35.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_75.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x1_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x1_25.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_large.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_medium.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_small.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_hybrid_large.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_hybrid_medium.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B1.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B2.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B3.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B4.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B6.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_base.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_small.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_tiny.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_base.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_large.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_small.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_25.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_35.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_75.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x1_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x1_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x2_0.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x2_5.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet101.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet101_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet152.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet152_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet18.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet18_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet200_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet34.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet34_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet50.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/ResNet50_vd.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S1.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S2.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S3.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S4.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_base_patch4_window12_384.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_base_patch4_window7_224.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_large_patch4_window12_384.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_large_patch4_window7_224.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_small_patch4_window7_224.yaml +0 -0
- /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_tiny_patch4_window7_224.yaml +0 -0
- /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec.yaml +0 -0
- /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec_CLIP_vit_base.yaml +0 -0
- /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec_CLIP_vit_large.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/CLIP_vit_base_patch16_448_ML.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B0_ML.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B4_ML.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B6_ML.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-LCNet_x1_0_ML.yaml +0 -0
- /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/ResNet50_ML.yaml +0 -0
- /paddlex/configs/{image_unwarping → modules/image_unwarping}/UVDoc.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Cascade-MaskRCNN-ResNet50-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-H.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-L.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-M.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-S.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-X.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNeXt101-vd-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet101-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet101-vd-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50-vd-FPN.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/PP-YOLOE_seg-S.yaml +0 -0
- /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/SOLOv2.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-L_layout_17cls.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-L_layout_3cls.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-S_layout_17cls.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-S_layout_3cls.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet_layout_1x.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet_layout_1x_table.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/RT-DETR-H_layout_17cls.yaml +0 -0
- /paddlex/configs/{structure_analysis → modules/layout_detection}/RT-DETR-H_layout_3cls.yaml +0 -0
- /paddlex/configs/{mainbody_detection → modules/mainbody_detection}/PP-ShiTuV2_det.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/Cascade-FasterRCNN-ResNet50-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/Cascade-FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/CenterNet-DLA-34.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/CenterNet-ResNet50.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/DETR-R50.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FCOS-ResNet50.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNeXt101-vd-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet101-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet101.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet34-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-vd-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-Swin-Tiny-FPN.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-L.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-M.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-S.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-X.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-L.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-M.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-S.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-XS.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-H.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-L.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-R18.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-R50.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-X.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-L.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-M.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-N.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-S.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-T.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-DarkNet53.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-MobileNetV3.yaml +0 -0
- /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-ResNet50_vd_DCN.yaml +0 -0
- /paddlex/configs/{pedestrian_attribute → modules/pedestrian_attribute_recognition}/PP-LCNet_x1_0_pedestrian_attribute.yaml +0 -0
- /paddlex/configs/{text_detection_seal → modules/seal_text_detection}/PP-OCRv4_mobile_seal_det.yaml +0 -0
- /paddlex/configs/{text_detection_seal → modules/seal_text_detection}/PP-OCRv4_server_seal_det.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3-R101.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3-R50.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3_Plus-R101.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3_Plus-R50.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/OCRNet_HRNet-W18.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/OCRNet_HRNet-W48.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/PP-LiteSeg-B.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/PP-LiteSeg-T.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B0.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B1.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B2.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B3.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B4.yaml +0 -0
- /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B5.yaml +0 -0
- /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-L.yaml +0 -0
- /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-S.yaml +0 -0
- /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-largesize-L.yaml +0 -0
- /paddlex/configs/{table_recognition → modules/table_structure_recognition}/SLANet.yaml +0 -0
- /paddlex/configs/{table_recognition → modules/table_structure_recognition}/SLANet_plus.yaml +0 -0
- /paddlex/configs/{text_detection → modules/text_detection}/PP-OCRv4_mobile_det.yaml +0 -0
- /paddlex/configs/{text_detection → modules/text_detection}/PP-OCRv4_server_det.yaml +0 -0
- /paddlex/configs/{text_recognition → modules/text_recognition}/PP-OCRv4_mobile_rec.yaml +0 -0
- /paddlex/configs/{text_recognition → modules/text_recognition}/PP-OCRv4_server_rec.yaml +0 -0
- /paddlex/configs/{text_recognition → modules/text_recognition}/ch_RepSVTR_rec.yaml +0 -0
- /paddlex/configs/{text_recognition → modules/text_recognition}/ch_SVTRv2_rec.yaml +0 -0
- /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/AutoEncoder_ad.yaml +0 -0
- /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/DLinear_ad.yaml +0 -0
- /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/Nonstationary_ad.yaml +0 -0
- /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/PatchTST_ad.yaml +0 -0
- /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/TimesNet_ad.yaml +0 -0
- /paddlex/configs/{ts_classification → modules/ts_classification}/TimesNet_cls.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/DLinear.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/NLinear.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/Nonstationary.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/PatchTST.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/RLinear.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/TiDE.yaml +0 -0
- /paddlex/configs/{ts_forecast → modules/ts_forecast}/TimesNet.yaml +0 -0
- /paddlex/configs/{vehicle_attribute → modules/vehicle_attribute_recognition}/PP-LCNet_x1_0_vehicle_attribute.yaml +0 -0
- /paddlex/configs/{vehicle_detection → modules/vehicle_detection}/PP-YOLOE-L_vehicle.yaml +0 -0
- /paddlex/configs/{vehicle_detection → modules/vehicle_detection}/PP-YOLOE-S_vehicle.yaml +0 -0
- /paddlex/inference/{results/utils → common}/__init__.py +0 -0
- {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/LICENSE +0 -0
- {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/entry_points.txt +0 -0
- {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,792 @@
|
|
1
|
+
# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import os, sys
|
16
|
+
from typing import Any, Dict, Optional, Union, List, Tuple
|
17
|
+
import numpy as np
|
18
|
+
import math
|
19
|
+
import cv2
|
20
|
+
from sklearn.cluster import KMeans
|
21
|
+
from ..base import BasePipeline
|
22
|
+
from ..components import CropByBoxes
|
23
|
+
from .utils import get_neighbor_boxes_idx
|
24
|
+
from .table_recognition_post_processing_v2 import get_table_recognition_res
|
25
|
+
from .table_recognition_post_processing import (
|
26
|
+
get_table_recognition_res as get_table_recognition_res_e2e,
|
27
|
+
)
|
28
|
+
from .result import SingleTableRecognitionResult, TableRecognitionResult
|
29
|
+
from ....utils import logging
|
30
|
+
from ...utils.pp_option import PaddlePredictorOption
|
31
|
+
from ...common.reader import ReadImage
|
32
|
+
from ...common.batch_sampler import ImageBatchSampler
|
33
|
+
from ..ocr.result import OCRResult
|
34
|
+
from ..doc_preprocessor.result import DocPreprocessorResult
|
35
|
+
|
36
|
+
from ...models.object_detection.result import DetResult
|
37
|
+
|
38
|
+
|
39
|
+
class TableRecognitionPipelineV2(BasePipeline):
|
40
|
+
"""Table Recognition Pipeline"""
|
41
|
+
|
42
|
+
entities = ["table_recognition_v2"]
|
43
|
+
|
44
|
+
def __init__(
|
45
|
+
self,
|
46
|
+
config: Dict,
|
47
|
+
device: str = None,
|
48
|
+
pp_option: PaddlePredictorOption = None,
|
49
|
+
use_hpip: bool = False,
|
50
|
+
hpi_params: Optional[Dict[str, Any]] = None,
|
51
|
+
) -> None:
|
52
|
+
"""Initializes the layout parsing pipeline.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
config (Dict): Configuration dictionary containing various settings.
|
56
|
+
device (str, optional): Device to run the predictions on. Defaults to None.
|
57
|
+
pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
|
58
|
+
use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
|
59
|
+
hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
|
60
|
+
"""
|
61
|
+
|
62
|
+
super().__init__(
|
63
|
+
device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
|
64
|
+
)
|
65
|
+
|
66
|
+
self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
|
67
|
+
if self.use_doc_preprocessor:
|
68
|
+
doc_preprocessor_config = config.get("SubPipelines", {}).get(
|
69
|
+
"DocPreprocessor",
|
70
|
+
{
|
71
|
+
"pipeline_config_error": "config error for doc_preprocessor_pipeline!"
|
72
|
+
},
|
73
|
+
)
|
74
|
+
self.doc_preprocessor_pipeline = self.create_pipeline(
|
75
|
+
doc_preprocessor_config
|
76
|
+
)
|
77
|
+
|
78
|
+
self.use_layout_detection = config.get("use_layout_detection", True)
|
79
|
+
if self.use_layout_detection:
|
80
|
+
layout_det_config = config.get("SubModules", {}).get(
|
81
|
+
"LayoutDetection",
|
82
|
+
{"model_config_error": "config error for layout_det_model!"},
|
83
|
+
)
|
84
|
+
self.layout_det_model = self.create_model(layout_det_config)
|
85
|
+
|
86
|
+
table_cls_config = config.get("SubModules", {}).get(
|
87
|
+
"TableClassification",
|
88
|
+
{"model_config_error": "config error for table_classification_model!"},
|
89
|
+
)
|
90
|
+
self.table_cls_model = self.create_model(table_cls_config)
|
91
|
+
|
92
|
+
wired_table_rec_config = config.get("SubModules", {}).get(
|
93
|
+
"WiredTableStructureRecognition",
|
94
|
+
{"model_config_error": "config error for wired_table_structure_model!"},
|
95
|
+
)
|
96
|
+
self.wired_table_rec_model = self.create_model(wired_table_rec_config)
|
97
|
+
|
98
|
+
wireless_table_rec_config = config.get("SubModules", {}).get(
|
99
|
+
"WirelessTableStructureRecognition",
|
100
|
+
{"model_config_error": "config error for wireless_table_structure_model!"},
|
101
|
+
)
|
102
|
+
self.wireless_table_rec_model = self.create_model(wireless_table_rec_config)
|
103
|
+
|
104
|
+
wired_table_cells_det_config = config.get("SubModules", {}).get(
|
105
|
+
"WiredTableCellsDetection",
|
106
|
+
{
|
107
|
+
"model_config_error": "config error for wired_table_cells_detection_model!"
|
108
|
+
},
|
109
|
+
)
|
110
|
+
self.wired_table_cells_detection_model = self.create_model(
|
111
|
+
wired_table_cells_det_config
|
112
|
+
)
|
113
|
+
|
114
|
+
wireless_table_cells_det_config = config.get("SubModules", {}).get(
|
115
|
+
"WirelessTableCellsDetection",
|
116
|
+
{
|
117
|
+
"model_config_error": "config error for wireless_table_cells_detection_model!"
|
118
|
+
},
|
119
|
+
)
|
120
|
+
self.wireless_table_cells_detection_model = self.create_model(
|
121
|
+
wireless_table_cells_det_config
|
122
|
+
)
|
123
|
+
|
124
|
+
self.use_ocr_model = config.get("use_ocr_model", True)
|
125
|
+
if self.use_ocr_model:
|
126
|
+
general_ocr_config = config.get("SubPipelines", {}).get(
|
127
|
+
"GeneralOCR",
|
128
|
+
{"pipeline_config_error": "config error for general_ocr_pipeline!"},
|
129
|
+
)
|
130
|
+
self.general_ocr_pipeline = self.create_pipeline(general_ocr_config)
|
131
|
+
else:
|
132
|
+
self.general_ocr_config_bak = config.get("SubPipelines", {}).get(
|
133
|
+
"GeneralOCR",
|
134
|
+
None
|
135
|
+
)
|
136
|
+
|
137
|
+
self._crop_by_boxes = CropByBoxes()
|
138
|
+
|
139
|
+
self.batch_sampler = ImageBatchSampler(batch_size=1)
|
140
|
+
self.img_reader = ReadImage(format="BGR")
|
141
|
+
|
142
|
+
def get_model_settings(
|
143
|
+
self,
|
144
|
+
use_doc_orientation_classify: Optional[bool],
|
145
|
+
use_doc_unwarping: Optional[bool],
|
146
|
+
use_layout_detection: Optional[bool],
|
147
|
+
use_ocr_model: Optional[bool],
|
148
|
+
) -> dict:
|
149
|
+
"""
|
150
|
+
Get the model settings based on the provided parameters or default values.
|
151
|
+
|
152
|
+
Args:
|
153
|
+
use_doc_orientation_classify (Optional[bool]): Whether to use document orientation classification.
|
154
|
+
use_doc_unwarping (Optional[bool]): Whether to use document unwarping.
|
155
|
+
use_layout_detection (Optional[bool]): Whether to use layout detection.
|
156
|
+
use_ocr_model (Optional[bool]): Whether to use OCR model.
|
157
|
+
|
158
|
+
Returns:
|
159
|
+
dict: A dictionary containing the model settings.
|
160
|
+
"""
|
161
|
+
if use_doc_orientation_classify is None and use_doc_unwarping is None:
|
162
|
+
use_doc_preprocessor = self.use_doc_preprocessor
|
163
|
+
else:
|
164
|
+
if use_doc_orientation_classify is True or use_doc_unwarping is True:
|
165
|
+
use_doc_preprocessor = True
|
166
|
+
else:
|
167
|
+
use_doc_preprocessor = False
|
168
|
+
|
169
|
+
if use_layout_detection is None:
|
170
|
+
use_layout_detection = self.use_layout_detection
|
171
|
+
|
172
|
+
if use_ocr_model is None:
|
173
|
+
use_ocr_model = self.use_ocr_model
|
174
|
+
|
175
|
+
return dict(
|
176
|
+
use_doc_preprocessor=use_doc_preprocessor,
|
177
|
+
use_layout_detection=use_layout_detection,
|
178
|
+
use_ocr_model=use_ocr_model,
|
179
|
+
)
|
180
|
+
|
181
|
+
def check_model_settings_valid(
|
182
|
+
self,
|
183
|
+
model_settings: Dict,
|
184
|
+
overall_ocr_res: OCRResult,
|
185
|
+
layout_det_res: DetResult,
|
186
|
+
) -> bool:
|
187
|
+
"""
|
188
|
+
Check if the input parameters are valid based on the initialized models.
|
189
|
+
|
190
|
+
Args:
|
191
|
+
model_settings (Dict): A dictionary containing input parameters.
|
192
|
+
overall_ocr_res (OCRResult): Overall OCR result obtained after running the OCR pipeline.
|
193
|
+
The overall OCR result with convert_points_to_boxes information.
|
194
|
+
layout_det_res (DetResult): The layout detection result.
|
195
|
+
Returns:
|
196
|
+
bool: True if all required models are initialized according to input parameters, False otherwise.
|
197
|
+
"""
|
198
|
+
|
199
|
+
if model_settings["use_doc_preprocessor"] and not self.use_doc_preprocessor:
|
200
|
+
logging.error(
|
201
|
+
"Set use_doc_preprocessor, but the models for doc preprocessor are not initialized."
|
202
|
+
)
|
203
|
+
return False
|
204
|
+
|
205
|
+
if model_settings["use_layout_detection"]:
|
206
|
+
if layout_det_res is not None:
|
207
|
+
logging.error(
|
208
|
+
"The layout detection model has already been initialized, please set use_layout_detection=False"
|
209
|
+
)
|
210
|
+
return False
|
211
|
+
|
212
|
+
if not self.use_layout_detection:
|
213
|
+
logging.error(
|
214
|
+
"Set use_layout_detection, but the models for layout detection are not initialized."
|
215
|
+
)
|
216
|
+
return False
|
217
|
+
|
218
|
+
if model_settings["use_ocr_model"]:
|
219
|
+
if overall_ocr_res is not None:
|
220
|
+
logging.error(
|
221
|
+
"The OCR models have already been initialized, please set use_ocr_model=False"
|
222
|
+
)
|
223
|
+
return False
|
224
|
+
|
225
|
+
if not self.use_ocr_model:
|
226
|
+
logging.error(
|
227
|
+
"Set use_ocr_model, but the models for OCR are not initialized."
|
228
|
+
)
|
229
|
+
return False
|
230
|
+
else:
|
231
|
+
if overall_ocr_res is None:
|
232
|
+
logging.error("Set use_ocr_model=False, but no OCR results were found.")
|
233
|
+
return False
|
234
|
+
return True
|
235
|
+
|
236
|
+
def predict_doc_preprocessor_res(
|
237
|
+
self, image_array: np.ndarray, input_params: dict
|
238
|
+
) -> Tuple[DocPreprocessorResult, np.ndarray]:
|
239
|
+
"""
|
240
|
+
Preprocess the document image based on input parameters.
|
241
|
+
|
242
|
+
Args:
|
243
|
+
image_array (np.ndarray): The input image array.
|
244
|
+
input_params (dict): Dictionary containing preprocessing parameters.
|
245
|
+
|
246
|
+
Returns:
|
247
|
+
tuple[DocPreprocessorResult, np.ndarray]: A tuple containing the preprocessing
|
248
|
+
result dictionary and the processed image array.
|
249
|
+
"""
|
250
|
+
if input_params["use_doc_preprocessor"]:
|
251
|
+
use_doc_orientation_classify = input_params["use_doc_orientation_classify"]
|
252
|
+
use_doc_unwarping = input_params["use_doc_unwarping"]
|
253
|
+
doc_preprocessor_res = next(
|
254
|
+
self.doc_preprocessor_pipeline(
|
255
|
+
image_array,
|
256
|
+
use_doc_orientation_classify=use_doc_orientation_classify,
|
257
|
+
use_doc_unwarping=use_doc_unwarping,
|
258
|
+
)
|
259
|
+
)
|
260
|
+
doc_preprocessor_image = doc_preprocessor_res["output_img"]
|
261
|
+
else:
|
262
|
+
doc_preprocessor_res = {}
|
263
|
+
doc_preprocessor_image = image_array
|
264
|
+
return doc_preprocessor_res, doc_preprocessor_image
|
265
|
+
|
266
|
+
def extract_results(self, pred, task):
|
267
|
+
if task == "cls":
|
268
|
+
return pred["label_names"][np.argmax(pred["scores"])]
|
269
|
+
elif task == "det":
|
270
|
+
threshold = 0.0
|
271
|
+
result = []
|
272
|
+
cell_score = []
|
273
|
+
if "boxes" in pred and isinstance(pred["boxes"], list):
|
274
|
+
for box in pred["boxes"]:
|
275
|
+
if isinstance(box, dict) and "score" in box and "coordinate" in box:
|
276
|
+
score = box["score"]
|
277
|
+
coordinate = box["coordinate"]
|
278
|
+
if isinstance(score, float) and score > threshold:
|
279
|
+
result.append(coordinate)
|
280
|
+
cell_score.append(score)
|
281
|
+
return result, cell_score
|
282
|
+
elif task == "table_stru":
|
283
|
+
return pred["structure"]
|
284
|
+
else:
|
285
|
+
return None
|
286
|
+
|
287
|
+
def cells_det_results_nms(
|
288
|
+
self, cells_det_results, cells_det_scores, cells_det_threshold=0.3
|
289
|
+
):
|
290
|
+
"""
|
291
|
+
Apply Non-Maximum Suppression (NMS) on detection results to remove redundant overlapping bounding boxes.
|
292
|
+
|
293
|
+
Args:
|
294
|
+
cells_det_results (list): List of bounding boxes, each box is in format [x1, y1, x2, y2].
|
295
|
+
cells_det_scores (list): List of confidence scores corresponding to the bounding boxes.
|
296
|
+
cells_det_threshold (float): IoU threshold for suppression. Boxes with IoU greater than this threshold
|
297
|
+
will be suppressed. Default is 0.5.
|
298
|
+
|
299
|
+
Returns:
|
300
|
+
Tuple[list, list]: A tuple containing the list of bounding boxes and confidence scores after NMS,
|
301
|
+
while maintaining one-to-one correspondence.
|
302
|
+
"""
|
303
|
+
# Convert lists to numpy arrays for efficient computation
|
304
|
+
boxes = np.array(cells_det_results)
|
305
|
+
scores = np.array(cells_det_scores)
|
306
|
+
# Initialize list for picked indices
|
307
|
+
picked_indices = []
|
308
|
+
# Get coordinates of bounding boxes
|
309
|
+
x1 = boxes[:, 0]
|
310
|
+
y1 = boxes[:, 1]
|
311
|
+
x2 = boxes[:, 2]
|
312
|
+
y2 = boxes[:, 3]
|
313
|
+
# Compute the area of the bounding boxes
|
314
|
+
areas = (x2 - x1) * (y2 - y1)
|
315
|
+
# Sort the bounding boxes by the confidence scores in descending order
|
316
|
+
order = scores.argsort()[::-1]
|
317
|
+
# Process the boxes
|
318
|
+
while order.size > 0:
|
319
|
+
# Index of the current highest score box
|
320
|
+
i = order[0]
|
321
|
+
picked_indices.append(i)
|
322
|
+
# Compute IoU between the highest score box and the rest
|
323
|
+
xx1 = np.maximum(x1[i], x1[order[1:]])
|
324
|
+
yy1 = np.maximum(y1[i], y1[order[1:]])
|
325
|
+
xx2 = np.minimum(x2[i], x2[order[1:]])
|
326
|
+
yy2 = np.minimum(y2[i], y2[order[1:]])
|
327
|
+
# Compute the width and height of the overlapping area
|
328
|
+
w = np.maximum(0.0, xx2 - xx1)
|
329
|
+
h = np.maximum(0.0, yy2 - yy1)
|
330
|
+
# Compute the ratio of overlap (IoU)
|
331
|
+
inter = w * h
|
332
|
+
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
333
|
+
# Indices of boxes with IoU less than threshold
|
334
|
+
inds = np.where(ovr <= cells_det_threshold)[0]
|
335
|
+
# Update order, only keep boxes with IoU less than threshold
|
336
|
+
order = order[
|
337
|
+
inds + 1
|
338
|
+
] # inds shifted by 1 because order[0] is the current box
|
339
|
+
# Select the boxes and scores based on picked indices
|
340
|
+
final_boxes = boxes[picked_indices].tolist()
|
341
|
+
final_scores = scores[picked_indices].tolist()
|
342
|
+
return final_boxes, final_scores
|
343
|
+
|
344
|
+
def get_region_ocr_det_boxes(self, ocr_det_boxes, table_box):
|
345
|
+
"""Adjust the coordinates of ocr_det_boxes that are fully inside table_box relative to table_box.
|
346
|
+
|
347
|
+
Args:
|
348
|
+
ocr_det_boxes (list of list): List of bounding boxes [x1, y1, x2, y2] in the original image.
|
349
|
+
table_box (list): Bounding box [x1, y1, x2, y2] of the target region in the original image.
|
350
|
+
|
351
|
+
Returns:
|
352
|
+
list of list: List of adjusted bounding boxes relative to table_box, for boxes fully inside table_box.
|
353
|
+
"""
|
354
|
+
tol = 0
|
355
|
+
# Extract coordinates from table_box
|
356
|
+
x_min_t, y_min_t, x_max_t, y_max_t = table_box
|
357
|
+
adjusted_boxes = []
|
358
|
+
for box in ocr_det_boxes:
|
359
|
+
x_min_b, y_min_b, x_max_b, y_max_b = box
|
360
|
+
# Check if the box is fully inside table_box
|
361
|
+
if (
|
362
|
+
x_min_b + tol >= x_min_t
|
363
|
+
and y_min_b + tol >= y_min_t
|
364
|
+
and x_max_b - tol <= x_max_t
|
365
|
+
and y_max_b - tol <= y_max_t
|
366
|
+
):
|
367
|
+
# Adjust the coordinates to be relative to table_box
|
368
|
+
adjusted_box = [
|
369
|
+
x_min_b - x_min_t, # Adjust x1
|
370
|
+
y_min_b - y_min_t, # Adjust y1
|
371
|
+
x_max_b - x_min_t, # Adjust x2
|
372
|
+
y_max_b - y_min_t, # Adjust y2
|
373
|
+
]
|
374
|
+
adjusted_boxes.append(adjusted_box)
|
375
|
+
# Discard boxes not fully inside table_box
|
376
|
+
return adjusted_boxes
|
377
|
+
|
378
|
+
def cells_det_results_reprocessing(
|
379
|
+
self, cells_det_results, cells_det_scores, ocr_det_results, html_pred_boxes_nums
|
380
|
+
):
|
381
|
+
"""
|
382
|
+
Process and filter cells_det_results based on ocr_det_results and html_pred_boxes_nums.
|
383
|
+
|
384
|
+
Args:
|
385
|
+
cells_det_results (List[List[float]]): List of detected cell rectangles [[x1, y1, x2, y2], ...].
|
386
|
+
cells_det_scores (List[float]): List of confidence scores for each rectangle in cells_det_results.
|
387
|
+
ocr_det_results (List[List[float]]): List of OCR detected rectangles [[x1, y1, x2, y2], ...].
|
388
|
+
html_pred_boxes_nums (int): The desired number of rectangles in the final output.
|
389
|
+
|
390
|
+
Returns:
|
391
|
+
List[List[float]]: The processed list of rectangles.
|
392
|
+
"""
|
393
|
+
|
394
|
+
# Function to compute IoU between two rectangles
|
395
|
+
def compute_iou(box1, box2):
|
396
|
+
"""
|
397
|
+
Compute the Intersection over Union (IoU) between two rectangles.
|
398
|
+
|
399
|
+
Args:
|
400
|
+
box1 (array-like): [x1, y1, x2, y2] of the first rectangle.
|
401
|
+
box2 (array-like): [x1, y1, x2, y2] of the second rectangle.
|
402
|
+
|
403
|
+
Returns:
|
404
|
+
float: The IoU between the two rectangles.
|
405
|
+
"""
|
406
|
+
# Determine the coordinates of the intersection rectangle
|
407
|
+
x_left = max(box1[0], box2[0])
|
408
|
+
y_top = max(box1[1], box2[1])
|
409
|
+
x_right = min(box1[2], box2[2])
|
410
|
+
y_bottom = min(box1[3], box2[3])
|
411
|
+
if x_right <= x_left or y_bottom <= y_top:
|
412
|
+
return 0.0
|
413
|
+
# Calculate the area of intersection rectangle
|
414
|
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
415
|
+
# Calculate the area of both rectangles
|
416
|
+
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
417
|
+
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
418
|
+
# Calculate the IoU
|
419
|
+
iou = intersection_area / float(box1_area)
|
420
|
+
return iou
|
421
|
+
|
422
|
+
# Function to combine rectangles into N rectangles
|
423
|
+
def combine_rectangles(rectangles, N):
|
424
|
+
"""
|
425
|
+
Combine rectangles into N rectangles based on geometric proximity.
|
426
|
+
|
427
|
+
Args:
|
428
|
+
rectangles (list of list of int): A list of rectangles, each represented by [x1, y1, x2, y2].
|
429
|
+
N (int): The desired number of combined rectangles.
|
430
|
+
|
431
|
+
Returns:
|
432
|
+
list of list of int: A list of N combined rectangles.
|
433
|
+
"""
|
434
|
+
# Number of input rectangles
|
435
|
+
num_rects = len(rectangles)
|
436
|
+
# If N is greater than or equal to the number of rectangles, return the original rectangles
|
437
|
+
if N >= num_rects:
|
438
|
+
return rectangles
|
439
|
+
# Compute the center points of the rectangles
|
440
|
+
centers = np.array(
|
441
|
+
[
|
442
|
+
[
|
443
|
+
(rect[0] + rect[2]) / 2, # Center x-coordinate
|
444
|
+
(rect[1] + rect[3]) / 2, # Center y-coordinate
|
445
|
+
]
|
446
|
+
for rect in rectangles
|
447
|
+
]
|
448
|
+
)
|
449
|
+
# Perform KMeans clustering on the center points to group them into N clusters
|
450
|
+
kmeans = KMeans(n_clusters=N, random_state=0, n_init="auto")
|
451
|
+
labels = kmeans.fit_predict(centers)
|
452
|
+
# Initialize a list to store the combined rectangles
|
453
|
+
combined_rectangles = []
|
454
|
+
# For each cluster, compute the minimal bounding rectangle that covers all rectangles in the cluster
|
455
|
+
for i in range(N):
|
456
|
+
# Get the indices of rectangles that belong to cluster i
|
457
|
+
indices = np.where(labels == i)[0]
|
458
|
+
if len(indices) == 0:
|
459
|
+
# If no rectangles in this cluster, skip it
|
460
|
+
continue
|
461
|
+
# Extract the rectangles in cluster i
|
462
|
+
cluster_rects = np.array([rectangles[idx] for idx in indices])
|
463
|
+
# Compute the minimal x1, y1 (top-left corner) and maximal x2, y2 (bottom-right corner)
|
464
|
+
x1_min = np.min(cluster_rects[:, 0])
|
465
|
+
y1_min = np.min(cluster_rects[:, 1])
|
466
|
+
x2_max = np.max(cluster_rects[:, 2])
|
467
|
+
y2_max = np.max(cluster_rects[:, 3])
|
468
|
+
# Append the combined rectangle to the list
|
469
|
+
combined_rectangles.append([x1_min, y1_min, x2_max, y2_max])
|
470
|
+
return combined_rectangles
|
471
|
+
|
472
|
+
# Ensure that the inputs are numpy arrays for efficient computation
|
473
|
+
cells_det_results = np.array(cells_det_results)
|
474
|
+
cells_det_scores = np.array(cells_det_scores)
|
475
|
+
ocr_det_results = np.array(ocr_det_results)
|
476
|
+
more_cells_flag = False
|
477
|
+
if len(cells_det_results) == html_pred_boxes_nums:
|
478
|
+
return cells_det_results
|
479
|
+
# Step 1: If cells_det_results has more rectangles than html_pred_boxes_nums
|
480
|
+
elif len(cells_det_results) > html_pred_boxes_nums:
|
481
|
+
more_cells_flag = True
|
482
|
+
# Select the indices of the top html_pred_boxes_nums scores
|
483
|
+
top_indices = np.argsort(-cells_det_scores)[:html_pred_boxes_nums]
|
484
|
+
# Adjust the corresponding rectangles
|
485
|
+
cells_det_results = cells_det_results[top_indices].tolist()
|
486
|
+
# Threshold for IoU
|
487
|
+
iou_threshold = 0.6
|
488
|
+
# List to store ocr_miss_boxes
|
489
|
+
ocr_miss_boxes = []
|
490
|
+
# For each rectangle in ocr_det_results
|
491
|
+
for ocr_rect in ocr_det_results:
|
492
|
+
merge_ocr_box_iou = []
|
493
|
+
# Flag to indicate if ocr_rect has IoU >= threshold with any cell_rect
|
494
|
+
has_large_iou = False
|
495
|
+
# For each rectangle in cells_det_results
|
496
|
+
for cell_rect in cells_det_results:
|
497
|
+
# Compute IoU
|
498
|
+
iou = compute_iou(ocr_rect, cell_rect)
|
499
|
+
if iou > 0:
|
500
|
+
merge_ocr_box_iou.append(iou)
|
501
|
+
if (iou >= iou_threshold) or (sum(merge_ocr_box_iou) >= iou_threshold):
|
502
|
+
has_large_iou = True
|
503
|
+
break
|
504
|
+
if not has_large_iou:
|
505
|
+
ocr_miss_boxes.append(ocr_rect)
|
506
|
+
# If no ocr_miss_boxes, return cells_det_results
|
507
|
+
if len(ocr_miss_boxes) == 0:
|
508
|
+
final_results = (
|
509
|
+
cells_det_results
|
510
|
+
if more_cells_flag == True
|
511
|
+
else cells_det_results.tolist()
|
512
|
+
)
|
513
|
+
else:
|
514
|
+
if more_cells_flag == True:
|
515
|
+
final_results = combine_rectangles(
|
516
|
+
cells_det_results + ocr_miss_boxes, html_pred_boxes_nums
|
517
|
+
)
|
518
|
+
else:
|
519
|
+
# Need to combine ocr_miss_boxes into N rectangles
|
520
|
+
N = html_pred_boxes_nums - len(cells_det_results)
|
521
|
+
# Combine ocr_miss_boxes into N rectangles
|
522
|
+
ocr_supp_boxes = combine_rectangles(ocr_miss_boxes, N)
|
523
|
+
# Combine cells_det_results and ocr_supp_boxes
|
524
|
+
final_results = np.concatenate(
|
525
|
+
(cells_det_results, ocr_supp_boxes), axis=0
|
526
|
+
).tolist()
|
527
|
+
if len(final_results) <= 0.6 * html_pred_boxes_nums:
|
528
|
+
final_results = combine_rectangles(ocr_det_results, html_pred_boxes_nums)
|
529
|
+
return final_results
|
530
|
+
|
531
|
+
def split_ocr_bboxes_by_table_cells(self, ori_img, cells_bboxes):
|
532
|
+
"""
|
533
|
+
Splits OCR bounding boxes by table cells and retrieves text.
|
534
|
+
|
535
|
+
Args:
|
536
|
+
ori_img (ndarray): The original image from which text regions will be extracted.
|
537
|
+
cells_bboxes (list or ndarray): Detected cell bounding boxes to extract text from.
|
538
|
+
|
539
|
+
Returns:
|
540
|
+
list: A list containing the recognized texts from each cell.
|
541
|
+
"""
|
542
|
+
|
543
|
+
# Check if cells_bboxes is a list and convert it if not.
|
544
|
+
if not isinstance(cells_bboxes, list):
|
545
|
+
cells_bboxes = cells_bboxes.tolist()
|
546
|
+
texts_list = [] # Initialize a list to store the recognized texts.
|
547
|
+
# Process each bounding box provided in cells_bboxes.
|
548
|
+
for i in range(len(cells_bboxes)):
|
549
|
+
# Extract and round up the coordinates of the bounding box.
|
550
|
+
x1, y1, x2, y2 = [math.ceil(k) for k in cells_bboxes[i]]
|
551
|
+
# Perform OCR on the defined region of the image and get the recognized text.
|
552
|
+
rec_te = next(self.general_ocr_pipeline(ori_img[y1:y2, x1:x2, :]))
|
553
|
+
# Concatenate the texts and append them to the texts_list.
|
554
|
+
texts_list.append("".join(rec_te["rec_texts"]))
|
555
|
+
# Return the list of recognized texts from each cell.
|
556
|
+
return texts_list
|
557
|
+
|
558
|
+
def predict_single_table_recognition_res(
|
559
|
+
self,
|
560
|
+
image_array: np.ndarray,
|
561
|
+
overall_ocr_res: OCRResult,
|
562
|
+
table_box: list,
|
563
|
+
use_table_cells_ocr_results: bool = False,
|
564
|
+
use_e2e_wired_table_rec_model: bool = False,
|
565
|
+
use_e2e_wireless_table_rec_model: bool = False,
|
566
|
+
flag_find_nei_text: bool = True,
|
567
|
+
) -> SingleTableRecognitionResult:
|
568
|
+
"""
|
569
|
+
Predict table recognition results from an image array, layout detection results, and OCR results.
|
570
|
+
|
571
|
+
Args:
|
572
|
+
image_array (np.ndarray): The input image represented as a numpy array.
|
573
|
+
overall_ocr_res (OCRResult): Overall OCR result obtained after running the OCR pipeline.
|
574
|
+
The overall OCR results containing text recognition information.
|
575
|
+
table_box (list): The table box coordinates.
|
576
|
+
use_table_cells_ocr_results (bool): whether to use OCR results with cells.
|
577
|
+
use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
|
578
|
+
use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
|
579
|
+
flag_find_nei_text (bool): Whether to find neighboring text.
|
580
|
+
Returns:
|
581
|
+
SingleTableRecognitionResult: single table recognition result.
|
582
|
+
"""
|
583
|
+
|
584
|
+
table_cls_pred = next(self.table_cls_model(image_array))
|
585
|
+
table_cls_result = self.extract_results(table_cls_pred, "cls")
|
586
|
+
use_e2e_model = False
|
587
|
+
|
588
|
+
if table_cls_result == "wired_table":
|
589
|
+
table_structure_pred = next(self.wired_table_rec_model(image_array))
|
590
|
+
if use_e2e_wired_table_rec_model == True:
|
591
|
+
use_e2e_model = True
|
592
|
+
else:
|
593
|
+
table_cells_pred = next(
|
594
|
+
self.wired_table_cells_detection_model(image_array, threshold=0.3)
|
595
|
+
) # Setting the threshold to 0.3 can improve the accuracy of table cells detection.
|
596
|
+
# If you really want more or fewer table cells detection boxes, the threshold can be adjusted.
|
597
|
+
elif table_cls_result == "wireless_table":
|
598
|
+
table_structure_pred = next(self.wireless_table_rec_model(image_array))
|
599
|
+
if use_e2e_wireless_table_rec_model == True:
|
600
|
+
use_e2e_model = True
|
601
|
+
else:
|
602
|
+
table_cells_pred = next(
|
603
|
+
self.wireless_table_cells_detection_model(image_array, threshold=0.3)
|
604
|
+
) # Setting the threshold to 0.3 can improve the accuracy of table cells detection.
|
605
|
+
# If you really want more or fewer table cells detection boxes, the threshold can be adjusted.
|
606
|
+
|
607
|
+
if use_e2e_model == False:
|
608
|
+
table_structure_result = self.extract_results(table_structure_pred, "table_stru")
|
609
|
+
table_cells_result, table_cells_score = self.extract_results(table_cells_pred, "det")
|
610
|
+
table_cells_result, table_cells_score = self.cells_det_results_nms(table_cells_result, table_cells_score)
|
611
|
+
ocr_det_boxes = self.get_region_ocr_det_boxes(overall_ocr_res["rec_boxes"].tolist(), table_box)
|
612
|
+
table_cells_result = self.cells_det_results_reprocessing(
|
613
|
+
table_cells_result,
|
614
|
+
table_cells_score,
|
615
|
+
ocr_det_boxes,
|
616
|
+
len(table_structure_pred["bbox"]),
|
617
|
+
)
|
618
|
+
if use_table_cells_ocr_results == True:
|
619
|
+
cells_texts_list = self.split_ocr_bboxes_by_table_cells(image_array, table_cells_result)
|
620
|
+
else:
|
621
|
+
cells_texts_list = []
|
622
|
+
single_table_recognition_res = get_table_recognition_res(
|
623
|
+
table_box,
|
624
|
+
table_structure_result,
|
625
|
+
table_cells_result,
|
626
|
+
overall_ocr_res,
|
627
|
+
cells_texts_list,
|
628
|
+
use_table_cells_ocr_results,
|
629
|
+
)
|
630
|
+
else:
|
631
|
+
if use_table_cells_ocr_results == True:
|
632
|
+
table_cells_result_e2e = list(map(lambda arr: arr.tolist(), table_structure_pred["bbox"]))
|
633
|
+
table_cells_result_e2e = [[rect[0], rect[1], rect[4], rect[5]]for rect in table_cells_result_e2e]
|
634
|
+
cells_texts_list = self.split_ocr_bboxes_by_table_cells(image_array, table_cells_result_e2e)
|
635
|
+
else:
|
636
|
+
cells_texts_list = []
|
637
|
+
single_table_recognition_res = get_table_recognition_res_e2e(
|
638
|
+
table_box,
|
639
|
+
table_structure_pred,
|
640
|
+
overall_ocr_res,
|
641
|
+
cells_texts_list,
|
642
|
+
use_table_cells_ocr_results,
|
643
|
+
)
|
644
|
+
|
645
|
+
neighbor_text = ""
|
646
|
+
if flag_find_nei_text:
|
647
|
+
match_idx_list = get_neighbor_boxes_idx(
|
648
|
+
overall_ocr_res["rec_boxes"], table_box
|
649
|
+
)
|
650
|
+
if len(match_idx_list) > 0:
|
651
|
+
for idx in match_idx_list:
|
652
|
+
neighbor_text += overall_ocr_res["rec_texts"][idx] + "; "
|
653
|
+
single_table_recognition_res["neighbor_texts"] = neighbor_text
|
654
|
+
return single_table_recognition_res
|
655
|
+
|
656
|
+
def predict(
|
657
|
+
self,
|
658
|
+
input: Union[str, List[str], np.ndarray, List[np.ndarray]],
|
659
|
+
use_doc_orientation_classify: Optional[bool] = None,
|
660
|
+
use_doc_unwarping: Optional[bool] = None,
|
661
|
+
use_layout_detection: Optional[bool] = None,
|
662
|
+
use_ocr_model: Optional[bool] = None,
|
663
|
+
overall_ocr_res: Optional[OCRResult] = None,
|
664
|
+
layout_det_res: Optional[DetResult] = None,
|
665
|
+
text_det_limit_side_len: Optional[int] = None,
|
666
|
+
text_det_limit_type: Optional[str] = None,
|
667
|
+
text_det_thresh: Optional[float] = None,
|
668
|
+
text_det_box_thresh: Optional[float] = None,
|
669
|
+
text_det_unclip_ratio: Optional[float] = None,
|
670
|
+
text_rec_score_thresh: Optional[float] = None,
|
671
|
+
use_table_cells_ocr_results: bool = False,
|
672
|
+
use_e2e_wired_table_rec_model: bool = False,
|
673
|
+
use_e2e_wireless_table_rec_model: bool = False,
|
674
|
+
**kwargs,
|
675
|
+
) -> TableRecognitionResult:
|
676
|
+
"""
|
677
|
+
This function predicts the layout parsing result for the given input.
|
678
|
+
|
679
|
+
Args:
|
680
|
+
input (Union[str, list[str], np.ndarray, list[np.ndarray]]): The input image(s) of pdf(s) to be processed.
|
681
|
+
use_layout_detection (bool): Whether to use layout detection.
|
682
|
+
use_doc_orientation_classify (bool): Whether to use document orientation classification.
|
683
|
+
use_doc_unwarping (bool): Whether to use document unwarping.
|
684
|
+
overall_ocr_res (OCRResult): The overall OCR result with convert_points_to_boxes information.
|
685
|
+
It will be used if it is not None and use_ocr_model is False.
|
686
|
+
layout_det_res (DetResult): The layout detection result.
|
687
|
+
It will be used if it is not None and use_layout_detection is False.
|
688
|
+
use_table_cells_ocr_results (bool): whether to use OCR results with cells.
|
689
|
+
use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
|
690
|
+
use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
|
691
|
+
flag_find_nei_text (bool): Whether to find neighboring text.
|
692
|
+
**kwargs: Additional keyword arguments.
|
693
|
+
|
694
|
+
Returns:
|
695
|
+
TableRecognitionResult: The predicted table recognition result.
|
696
|
+
"""
|
697
|
+
|
698
|
+
model_settings = self.get_model_settings(
|
699
|
+
use_doc_orientation_classify,
|
700
|
+
use_doc_unwarping,
|
701
|
+
use_layout_detection,
|
702
|
+
use_ocr_model,
|
703
|
+
)
|
704
|
+
|
705
|
+
if not self.check_model_settings_valid(
|
706
|
+
model_settings, overall_ocr_res, layout_det_res
|
707
|
+
):
|
708
|
+
yield {"error": "the input params for model settings are invalid!"}
|
709
|
+
|
710
|
+
for img_id, batch_data in enumerate(self.batch_sampler(input)):
|
711
|
+
image_array = self.img_reader(batch_data.instances)[0]
|
712
|
+
|
713
|
+
if model_settings["use_doc_preprocessor"]:
|
714
|
+
doc_preprocessor_res = next(
|
715
|
+
self.doc_preprocessor_pipeline(
|
716
|
+
image_array,
|
717
|
+
use_doc_orientation_classify=use_doc_orientation_classify,
|
718
|
+
use_doc_unwarping=use_doc_unwarping,
|
719
|
+
)
|
720
|
+
)
|
721
|
+
else:
|
722
|
+
doc_preprocessor_res = {"output_img": image_array}
|
723
|
+
|
724
|
+
doc_preprocessor_image = doc_preprocessor_res["output_img"]
|
725
|
+
|
726
|
+
if model_settings["use_ocr_model"]:
|
727
|
+
overall_ocr_res = next(
|
728
|
+
self.general_ocr_pipeline(
|
729
|
+
doc_preprocessor_image,
|
730
|
+
text_det_limit_side_len=text_det_limit_side_len,
|
731
|
+
text_det_limit_type=text_det_limit_type,
|
732
|
+
text_det_thresh=text_det_thresh,
|
733
|
+
text_det_box_thresh=text_det_box_thresh,
|
734
|
+
text_det_unclip_ratio=text_det_unclip_ratio,
|
735
|
+
text_rec_score_thresh=text_rec_score_thresh,
|
736
|
+
)
|
737
|
+
)
|
738
|
+
elif use_table_cells_ocr_results == True:
|
739
|
+
assert self.general_ocr_config_bak != None
|
740
|
+
self.general_ocr_pipeline = self.create_pipeline(self.general_ocr_config_bak)
|
741
|
+
|
742
|
+
table_res_list = []
|
743
|
+
table_region_id = 1
|
744
|
+
if not model_settings["use_layout_detection"] and layout_det_res is None:
|
745
|
+
layout_det_res = {}
|
746
|
+
img_height, img_width = doc_preprocessor_image.shape[:2]
|
747
|
+
table_box = [0, 0, img_width - 1, img_height - 1]
|
748
|
+
single_table_rec_res = self.predict_single_table_recognition_res(
|
749
|
+
doc_preprocessor_image,
|
750
|
+
overall_ocr_res,
|
751
|
+
table_box,
|
752
|
+
use_table_cells_ocr_results,
|
753
|
+
use_e2e_wired_table_rec_model,
|
754
|
+
use_e2e_wireless_table_rec_model,
|
755
|
+
flag_find_nei_text=False,
|
756
|
+
)
|
757
|
+
single_table_rec_res["table_region_id"] = table_region_id
|
758
|
+
table_res_list.append(single_table_rec_res)
|
759
|
+
table_region_id += 1
|
760
|
+
else:
|
761
|
+
if model_settings["use_layout_detection"]:
|
762
|
+
layout_det_res = next(self.layout_det_model(doc_preprocessor_image))
|
763
|
+
|
764
|
+
for box_info in layout_det_res["boxes"]:
|
765
|
+
if box_info["label"].lower() in ["table"]:
|
766
|
+
crop_img_info = self._crop_by_boxes(image_array, [box_info])
|
767
|
+
crop_img_info = crop_img_info[0]
|
768
|
+
table_box = crop_img_info["box"]
|
769
|
+
single_table_rec_res = (
|
770
|
+
self.predict_single_table_recognition_res(
|
771
|
+
crop_img_info["img"],
|
772
|
+
overall_ocr_res,
|
773
|
+
table_box,
|
774
|
+
use_table_cells_ocr_results,
|
775
|
+
use_e2e_wired_table_rec_model,
|
776
|
+
use_e2e_wireless_table_rec_model,
|
777
|
+
)
|
778
|
+
)
|
779
|
+
single_table_rec_res["table_region_id"] = table_region_id
|
780
|
+
table_res_list.append(single_table_rec_res)
|
781
|
+
table_region_id += 1
|
782
|
+
|
783
|
+
single_img_res = {
|
784
|
+
"input_path": batch_data.input_paths[0],
|
785
|
+
"page_index": batch_data.page_indexes[0],
|
786
|
+
"doc_preprocessor_res": doc_preprocessor_res,
|
787
|
+
"layout_det_res": layout_det_res,
|
788
|
+
"overall_ocr_res": overall_ocr_res,
|
789
|
+
"table_res_list": table_res_list,
|
790
|
+
"model_settings": model_settings,
|
791
|
+
}
|
792
|
+
yield TableRecognitionResult(single_img_res)
|