paddlex 3.0.0rc1__py3-none-any.whl → 3.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddlex/.version +1 -1
- paddlex/__init__.py +1 -1
- paddlex/configs/modules/chart_parsing/PP-Chart2Table.yaml +13 -0
- paddlex/configs/modules/doc_vlm/PP-DocBee2-3B.yaml +14 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-L.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-M.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-S.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocBlockLayout.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-L.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout-M.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout-S.yaml +2 -2
- paddlex/configs/modules/layout_detection/PP-DocLayout_plus-L.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_server_det.yaml +40 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_server_rec.yaml +39 -0
- paddlex/configs/modules/textline_orientation/PP-LCNet_x1_0_textline_ori.yaml +41 -0
- paddlex/configs/pipelines/OCR.yaml +7 -6
- paddlex/configs/pipelines/PP-ChatOCRv3-doc.yaml +3 -1
- paddlex/configs/pipelines/PP-ChatOCRv4-doc.yaml +91 -34
- paddlex/configs/pipelines/PP-StructureV3.yaml +72 -72
- paddlex/configs/pipelines/doc_understanding.yaml +1 -1
- paddlex/configs/pipelines/formula_recognition.yaml +2 -2
- paddlex/configs/pipelines/layout_parsing.yaml +3 -2
- paddlex/configs/pipelines/seal_recognition.yaml +1 -0
- paddlex/configs/pipelines/table_recognition.yaml +2 -1
- paddlex/configs/pipelines/table_recognition_v2.yaml +7 -1
- paddlex/hpip_links.html +20 -20
- paddlex/inference/common/batch_sampler/doc_vlm_batch_sampler.py +33 -10
- paddlex/inference/common/batch_sampler/image_batch_sampler.py +34 -25
- paddlex/inference/common/result/mixin.py +19 -12
- paddlex/inference/models/base/predictor/base_predictor.py +2 -8
- paddlex/inference/models/common/static_infer.py +29 -73
- paddlex/inference/models/common/tokenizer/__init__.py +2 -0
- paddlex/inference/models/common/tokenizer/clip_tokenizer.py +1 -1
- paddlex/inference/models/common/tokenizer/gpt_tokenizer.py +2 -2
- paddlex/inference/models/common/tokenizer/qwen2_5_tokenizer.py +112 -0
- paddlex/inference/models/common/tokenizer/qwen2_tokenizer.py +7 -1
- paddlex/inference/models/common/tokenizer/qwen_tokenizer.py +288 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils.py +13 -13
- paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py +3 -3
- paddlex/inference/models/common/tokenizer/vocab.py +7 -7
- paddlex/inference/models/common/ts/funcs.py +19 -8
- paddlex/inference/models/common/vlm/conversion_utils.py +99 -0
- paddlex/inference/models/common/vlm/fusion_ops.py +205 -0
- paddlex/inference/models/common/vlm/generation/configuration_utils.py +1 -1
- paddlex/inference/models/common/vlm/generation/logits_process.py +1 -1
- paddlex/inference/models/common/vlm/generation/utils.py +1 -1
- paddlex/inference/models/common/vlm/transformers/configuration_utils.py +3 -3
- paddlex/inference/models/common/vlm/transformers/conversion_utils.py +3 -3
- paddlex/inference/models/common/vlm/transformers/model_outputs.py +2 -2
- paddlex/inference/models/common/vlm/transformers/model_utils.py +7 -31
- paddlex/inference/models/doc_vlm/modeling/GOT_ocr_2_0.py +830 -0
- paddlex/inference/models/doc_vlm/modeling/__init__.py +2 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2.py +1606 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_5_vl.py +3006 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_vl.py +0 -105
- paddlex/inference/models/doc_vlm/predictor.py +79 -24
- paddlex/inference/models/doc_vlm/processors/GOT_ocr_2_0.py +97 -0
- paddlex/inference/models/doc_vlm/processors/__init__.py +2 -0
- paddlex/inference/models/doc_vlm/processors/common.py +189 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_5_vl.py +548 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_vl.py +21 -176
- paddlex/inference/models/formula_recognition/predictor.py +8 -2
- paddlex/inference/models/formula_recognition/processors.py +90 -77
- paddlex/inference/models/formula_recognition/result.py +28 -27
- paddlex/inference/models/image_feature/processors.py +3 -4
- paddlex/inference/models/keypoint_detection/predictor.py +3 -0
- paddlex/inference/models/object_detection/predictor.py +2 -0
- paddlex/inference/models/object_detection/processors.py +28 -3
- paddlex/inference/models/object_detection/utils.py +2 -0
- paddlex/inference/models/table_structure_recognition/result.py +0 -10
- paddlex/inference/models/text_detection/predictor.py +8 -0
- paddlex/inference/models/text_detection/processors.py +44 -10
- paddlex/inference/models/text_detection/result.py +0 -10
- paddlex/inference/models/text_recognition/result.py +1 -1
- paddlex/inference/pipelines/__init__.py +9 -5
- paddlex/inference/pipelines/_parallel.py +172 -0
- paddlex/inference/pipelines/anomaly_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/attribute_recognition/pipeline.py +11 -1
- paddlex/inference/pipelines/base.py +14 -4
- paddlex/inference/pipelines/components/faisser.py +1 -1
- paddlex/inference/pipelines/doc_preprocessor/pipeline.py +53 -27
- paddlex/inference/pipelines/formula_recognition/pipeline.py +120 -82
- paddlex/inference/pipelines/formula_recognition/result.py +1 -11
- paddlex/inference/pipelines/image_classification/pipeline.py +16 -6
- paddlex/inference/pipelines/image_multilabel_classification/pipeline.py +16 -6
- paddlex/inference/pipelines/instance_segmentation/pipeline.py +16 -6
- paddlex/inference/pipelines/keypoint_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/layout_parsing/layout_objects.py +859 -0
- paddlex/inference/pipelines/layout_parsing/pipeline.py +34 -47
- paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +832 -260
- paddlex/inference/pipelines/layout_parsing/result.py +4 -17
- paddlex/inference/pipelines/layout_parsing/result_v2.py +259 -245
- paddlex/inference/pipelines/layout_parsing/setting.py +88 -0
- paddlex/inference/pipelines/layout_parsing/utils.py +391 -2028
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/__init__.py +16 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py +1199 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/xycuts.py +615 -0
- paddlex/inference/pipelines/m_3d_bev_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/multilingual_speech_recognition/pipeline.py +2 -2
- paddlex/inference/pipelines/object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/ocr/pipeline.py +127 -70
- paddlex/inference/pipelines/ocr/result.py +21 -18
- paddlex/inference/pipelines/open_vocabulary_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/open_vocabulary_segmentation/pipeline.py +2 -2
- paddlex/inference/pipelines/pp_chatocr/pipeline_base.py +2 -2
- paddlex/inference/pipelines/pp_chatocr/pipeline_v3.py +2 -5
- paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +6 -6
- paddlex/inference/pipelines/rotated_object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/seal_recognition/pipeline.py +109 -53
- paddlex/inference/pipelines/semantic_segmentation/pipeline.py +16 -6
- paddlex/inference/pipelines/small_object_detection/pipeline.py +16 -6
- paddlex/inference/pipelines/table_recognition/pipeline.py +26 -18
- paddlex/inference/pipelines/table_recognition/pipeline_v2.py +624 -53
- paddlex/inference/pipelines/table_recognition/result.py +1 -1
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing_v2.py +9 -5
- paddlex/inference/pipelines/ts_anomaly_detection/pipeline.py +2 -2
- paddlex/inference/pipelines/ts_classification/pipeline.py +2 -2
- paddlex/inference/pipelines/ts_forecasting/pipeline.py +2 -2
- paddlex/inference/pipelines/video_classification/pipeline.py +2 -2
- paddlex/inference/pipelines/video_detection/pipeline.py +2 -2
- paddlex/inference/serving/basic_serving/_app.py +46 -13
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/common.py +5 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py +0 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py +0 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py +1 -1
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py +6 -2
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py +1 -5
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py +4 -5
- paddlex/inference/serving/infra/utils.py +20 -22
- paddlex/inference/serving/schemas/formula_recognition.py +1 -1
- paddlex/inference/serving/schemas/layout_parsing.py +1 -2
- paddlex/inference/serving/schemas/pp_chatocrv3_doc.py +1 -2
- paddlex/inference/serving/schemas/pp_chatocrv4_doc.py +2 -2
- paddlex/inference/serving/schemas/pp_structurev3.py +10 -6
- paddlex/inference/serving/schemas/seal_recognition.py +1 -1
- paddlex/inference/serving/schemas/table_recognition.py +2 -6
- paddlex/inference/serving/schemas/table_recognition_v2.py +5 -6
- paddlex/inference/utils/hpi.py +30 -16
- paddlex/inference/utils/hpi_model_info_collection.json +666 -162
- paddlex/inference/utils/io/readers.py +12 -12
- paddlex/inference/utils/misc.py +20 -0
- paddlex/inference/utils/mkldnn_blocklist.py +59 -0
- paddlex/inference/utils/official_models.py +140 -5
- paddlex/inference/utils/pp_option.py +74 -9
- paddlex/model.py +2 -2
- paddlex/modules/__init__.py +1 -1
- paddlex/modules/anomaly_detection/evaluator.py +2 -2
- paddlex/modules/base/__init__.py +1 -1
- paddlex/modules/base/evaluator.py +5 -5
- paddlex/modules/base/trainer.py +1 -1
- paddlex/modules/doc_vlm/dataset_checker.py +2 -2
- paddlex/modules/doc_vlm/evaluator.py +2 -2
- paddlex/modules/doc_vlm/exportor.py +2 -2
- paddlex/modules/doc_vlm/model_list.py +1 -1
- paddlex/modules/doc_vlm/trainer.py +2 -2
- paddlex/modules/face_recognition/evaluator.py +2 -2
- paddlex/modules/formula_recognition/evaluator.py +5 -2
- paddlex/modules/formula_recognition/model_list.py +3 -0
- paddlex/modules/formula_recognition/trainer.py +3 -0
- paddlex/modules/general_recognition/evaluator.py +1 -1
- paddlex/modules/image_classification/evaluator.py +2 -2
- paddlex/modules/image_classification/model_list.py +1 -0
- paddlex/modules/instance_segmentation/evaluator.py +1 -1
- paddlex/modules/keypoint_detection/evaluator.py +1 -1
- paddlex/modules/m_3d_bev_detection/evaluator.py +2 -2
- paddlex/modules/multilabel_classification/evaluator.py +2 -2
- paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py +4 -4
- paddlex/modules/object_detection/evaluator.py +2 -2
- paddlex/modules/object_detection/model_list.py +2 -0
- paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +12 -2
- paddlex/modules/semantic_segmentation/evaluator.py +2 -2
- paddlex/modules/table_recognition/evaluator.py +2 -2
- paddlex/modules/text_detection/evaluator.py +2 -2
- paddlex/modules/text_detection/model_list.py +2 -0
- paddlex/modules/text_recognition/evaluator.py +2 -2
- paddlex/modules/text_recognition/model_list.py +2 -0
- paddlex/modules/ts_anomaly_detection/evaluator.py +2 -2
- paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py +1 -1
- paddlex/modules/ts_classification/evaluator.py +2 -2
- paddlex/modules/ts_forecast/evaluator.py +2 -2
- paddlex/modules/video_classification/evaluator.py +2 -2
- paddlex/modules/video_detection/evaluator.py +2 -2
- paddlex/ops/__init__.py +8 -5
- paddlex/paddlex_cli.py +19 -13
- paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py +2 -2
- paddlex/repo_apis/PaddleClas_api/cls/config.py +1 -1
- paddlex/repo_apis/PaddleClas_api/cls/model.py +1 -1
- paddlex/repo_apis/PaddleClas_api/cls/register.py +10 -0
- paddlex/repo_apis/PaddleClas_api/cls/runner.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/config.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/model.py +1 -1
- paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +25 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/register.py +30 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py +3 -3
- paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py +5 -9
- paddlex/repo_apis/PaddleOCR_api/formula_rec/register.py +27 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/table_rec/model.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_det/model.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_det/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/runner.py +1 -1
- paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +3 -3
- paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +5 -9
- paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +18 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py +1 -1
- paddlex/repo_apis/PaddleSeg_api/seg/model.py +1 -1
- paddlex/repo_apis/PaddleSeg_api/seg/runner.py +1 -1
- paddlex/repo_apis/PaddleTS_api/ts_ad/config.py +3 -3
- paddlex/repo_apis/PaddleTS_api/ts_cls/config.py +2 -2
- paddlex/repo_apis/PaddleTS_api/ts_fc/config.py +4 -4
- paddlex/repo_apis/PaddleVideo_api/video_cls/config.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_cls/model.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/config.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/model.py +1 -1
- paddlex/repo_apis/PaddleVideo_api/video_det/runner.py +1 -1
- paddlex/repo_apis/base/config.py +1 -1
- paddlex/repo_manager/core.py +3 -3
- paddlex/repo_manager/meta.py +6 -2
- paddlex/repo_manager/repo.py +17 -16
- paddlex/utils/custom_device_list.py +26 -2
- paddlex/utils/deps.py +3 -3
- paddlex/utils/device.py +5 -13
- paddlex/utils/env.py +4 -0
- paddlex/utils/flags.py +11 -4
- paddlex/utils/fonts/__init__.py +34 -4
- paddlex/utils/misc.py +1 -1
- paddlex/utils/subclass_register.py +2 -2
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/METADATA +349 -208
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/RECORD +240 -211
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/WHEEL +1 -1
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/entry_points.txt +1 -0
- {paddlex-3.0.0rc1.dist-info/licenses → paddlex-3.0.2.dist-info}/LICENSE +0 -0
- {paddlex-3.0.0rc1.dist-info → paddlex-3.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,548 @@
|
|
1
|
+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
from typing import Dict, List, Optional, Union
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
|
20
|
+
from .....utils import logging
|
21
|
+
from ....utils.benchmark import benchmark
|
22
|
+
from ...common.tokenizer.tokenizer_utils_base import (
|
23
|
+
PreTokenizedInput,
|
24
|
+
TensorType,
|
25
|
+
TextInput,
|
26
|
+
TruncationStrategy,
|
27
|
+
)
|
28
|
+
from ...common.vision.funcs import resize
|
29
|
+
from .common import (
|
30
|
+
BatchFeature,
|
31
|
+
ChannelDimension,
|
32
|
+
ImageInput,
|
33
|
+
PaddingStrategy,
|
34
|
+
PILImageResampling,
|
35
|
+
convert_to_rgb,
|
36
|
+
fetch_image,
|
37
|
+
get_image_size,
|
38
|
+
infer_channel_dimension_format,
|
39
|
+
make_batched_images,
|
40
|
+
make_list_of_images,
|
41
|
+
smart_resize,
|
42
|
+
to_channel_dimension_format,
|
43
|
+
to_numpy_array,
|
44
|
+
valid_images,
|
45
|
+
)
|
46
|
+
|
47
|
+
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
48
|
+
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|
49
|
+
|
50
|
+
IMAGE_FACTOR = 28
|
51
|
+
MIN_PIXELS = 4 * 28 * 28
|
52
|
+
MAX_PIXELS = 16384 * 28 * 28
|
53
|
+
MAX_RATIO = 200
|
54
|
+
|
55
|
+
__all__ = [
|
56
|
+
"Qwen2_5_VLProcessor",
|
57
|
+
"Qwen2_5_VLImageProcessor",
|
58
|
+
"PPDocBee2Processor",
|
59
|
+
]
|
60
|
+
|
61
|
+
|
62
|
+
def is_scaled_image(image: np.ndarray) -> bool:
|
63
|
+
"""
|
64
|
+
Checks to see whether the pixel values have already been rescaled to [0, 1].
|
65
|
+
"""
|
66
|
+
if image.dtype == np.uint8:
|
67
|
+
return False
|
68
|
+
|
69
|
+
return np.min(image) >= 0 and np.max(image) <= 1
|
70
|
+
|
71
|
+
|
72
|
+
class Qwen2_5_VLProcessor(object):
|
73
|
+
"""
|
74
|
+
Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor.
|
75
|
+
[`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2_5_VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
|
76
|
+
[`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information.
|
77
|
+
Args:
|
78
|
+
image_processor ([`Qwen2_5_VLImageProcessor`], *optional*):
|
79
|
+
The image processor is a required input.
|
80
|
+
tokenizer ([`Qwen2TokenizerFast`], *optional*):
|
81
|
+
The tokenizer is a required input.
|
82
|
+
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
|
83
|
+
in a chat into a tokenizable string.
|
84
|
+
"""
|
85
|
+
|
86
|
+
def __init__(self, image_processor, tokenizer, **kwargs):
|
87
|
+
self.image_processor = image_processor
|
88
|
+
self.tokenizer = tokenizer
|
89
|
+
self.image_processor.min_pixels = kwargs.get("min_pixels", 3136)
|
90
|
+
self.image_processor.max_pixels = kwargs.get("max_pixels", 12845056)
|
91
|
+
|
92
|
+
def preprocess(
|
93
|
+
self,
|
94
|
+
images: ImageInput = None,
|
95
|
+
text: Union[
|
96
|
+
TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]
|
97
|
+
] = None,
|
98
|
+
padding: Union[bool, str, PaddingStrategy] = False,
|
99
|
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
100
|
+
max_length: int = None,
|
101
|
+
return_tensors: Optional[Union[str, TensorType]] = TensorType.PADDLE,
|
102
|
+
) -> BatchFeature:
|
103
|
+
"""
|
104
|
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
105
|
+
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
|
106
|
+
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
|
107
|
+
Qwen2_5_VLImageProcessor's [`~Qwen2_5_VLImageProcessor.__call__`] if `vision_infos` is not `None`.
|
108
|
+
|
109
|
+
Args:
|
110
|
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
111
|
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
112
|
+
tensor. Both channels-first and channels-last formats are supported.
|
113
|
+
text (`str`, `List[str]`, `List[List[str]]`):
|
114
|
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
115
|
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
116
|
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
117
|
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
118
|
+
If set, will return tensors of a particular framework. Acceptable values are:
|
119
|
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
120
|
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
121
|
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
122
|
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
123
|
+
|
124
|
+
Returns:
|
125
|
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
126
|
+
|
127
|
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
128
|
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
129
|
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
130
|
+
`None`).
|
131
|
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
132
|
+
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
|
133
|
+
"""
|
134
|
+
if images is not None:
|
135
|
+
image_inputs = self.image_processor(
|
136
|
+
images=images, return_tensors=return_tensors
|
137
|
+
)
|
138
|
+
image_grid_thw = image_inputs["image_grid_thw"]
|
139
|
+
else:
|
140
|
+
image_inputs = {}
|
141
|
+
image_grid_thw = None
|
142
|
+
|
143
|
+
if not isinstance(text, list):
|
144
|
+
text = [text]
|
145
|
+
if image_grid_thw is not None:
|
146
|
+
merge_length = self.image_processor.merge_size**2
|
147
|
+
index = 0
|
148
|
+
for i in range(len(text)):
|
149
|
+
while "<|image_pad|>" in text[i]:
|
150
|
+
text[i] = text[i].replace(
|
151
|
+
"<|image_pad|>",
|
152
|
+
"<|placeholder|>"
|
153
|
+
* int(image_grid_thw[index].prod() // merge_length),
|
154
|
+
1,
|
155
|
+
)
|
156
|
+
index += 1
|
157
|
+
text[i] = text[i].replace("<|placeholder|>", "<|image_pad|>")
|
158
|
+
|
159
|
+
text_inputs = self.tokenizer(
|
160
|
+
text,
|
161
|
+
return_tensors=return_tensors,
|
162
|
+
padding=padding,
|
163
|
+
truncation=truncation,
|
164
|
+
max_length=max_length,
|
165
|
+
)
|
166
|
+
|
167
|
+
return BatchFeature(data={**text_inputs, **image_inputs})
|
168
|
+
|
169
|
+
def batch_decode(self, *args, **kwargs):
|
170
|
+
"""
|
171
|
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
172
|
+
refer to the docstring of this method for more information.
|
173
|
+
"""
|
174
|
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
175
|
+
|
176
|
+
def decode(self, *args, **kwargs):
|
177
|
+
"""
|
178
|
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
179
|
+
the docstring of this method for more information.
|
180
|
+
"""
|
181
|
+
return self.tokenizer.decode(*args, **kwargs)
|
182
|
+
|
183
|
+
|
184
|
+
class Qwen2_5_VLImageProcessor(object):
|
185
|
+
"""
|
186
|
+
Constructs a Qwen2.5-VL image processor that dynamically resizes images based on the original images.
|
187
|
+
|
188
|
+
Args:
|
189
|
+
do_resize (`bool`, *optional*, defaults to `True`):
|
190
|
+
Whether to resize the image's (height, width) dimensions.
|
191
|
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
192
|
+
Resampling filter to use when resizing the image.
|
193
|
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
194
|
+
Whether to rescale the image by the specified scale `rescale_factor`.
|
195
|
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
196
|
+
Scale factor to use if rescaling the image.
|
197
|
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
198
|
+
Whether to normalize the image.
|
199
|
+
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
200
|
+
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
|
201
|
+
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
202
|
+
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
|
203
|
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
204
|
+
Whether to convert the image to RGB.
|
205
|
+
min_pixels (`int`, *optional*, defaults to `56 * 56`):
|
206
|
+
The min pixels of the image to resize the image.
|
207
|
+
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
|
208
|
+
The max pixels of the image to resize the image.
|
209
|
+
patch_size (`int`, *optional*, defaults to 14):
|
210
|
+
The spatial patch size of the vision encoder.
|
211
|
+
temporal_patch_size (`int`, *optional*, defaults to 2):
|
212
|
+
The temporal patch size of the vision encoder.
|
213
|
+
merge_size (`int`, *optional*, defaults to 2):
|
214
|
+
The merge size of the vision encoder to llm encoder.
|
215
|
+
"""
|
216
|
+
|
217
|
+
model_input_names = ["pixel_values", "image_grid_thw", "second_per_grid_ts"]
|
218
|
+
|
219
|
+
def __init__(
|
220
|
+
self,
|
221
|
+
do_resize: bool = True,
|
222
|
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
223
|
+
do_rescale: bool = True,
|
224
|
+
rescale_factor: Union[int, float] = 1 / 255,
|
225
|
+
do_normalize: bool = True,
|
226
|
+
image_mean: Optional[Union[float, List[float]]] = None,
|
227
|
+
image_std: Optional[Union[float, List[float]]] = None,
|
228
|
+
do_convert_rgb: bool = True,
|
229
|
+
min_pixels: int = 56 * 56,
|
230
|
+
max_pixels: int = 28 * 28 * 1280,
|
231
|
+
patch_size: int = 14,
|
232
|
+
temporal_patch_size: int = 2,
|
233
|
+
merge_size: int = 2,
|
234
|
+
**kwargs,
|
235
|
+
) -> None:
|
236
|
+
super().__init__(**kwargs)
|
237
|
+
self.do_resize = do_resize
|
238
|
+
self.resample = resample
|
239
|
+
self.do_rescale = do_rescale
|
240
|
+
self.rescale_factor = rescale_factor
|
241
|
+
self.do_normalize = do_normalize
|
242
|
+
image_mean_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
243
|
+
image_std_ = image_std if image_std is not None else OPENAI_CLIP_STD
|
244
|
+
self.min_pixels = min_pixels
|
245
|
+
self.max_pixels = max_pixels
|
246
|
+
self.patch_size = patch_size
|
247
|
+
self.temporal_patch_size = temporal_patch_size
|
248
|
+
self.merge_size = merge_size
|
249
|
+
self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
|
250
|
+
self.do_convert_rgb = do_convert_rgb
|
251
|
+
|
252
|
+
self.image_mean = np.array(image_mean_)[None, None, ...]
|
253
|
+
self.image_std = np.array(image_std_)[None, None, ...]
|
254
|
+
|
255
|
+
def _preprocess(
|
256
|
+
self,
|
257
|
+
images: Union[ImageInput],
|
258
|
+
do_resize: bool = None,
|
259
|
+
resample: PILImageResampling = None,
|
260
|
+
do_rescale: bool = None,
|
261
|
+
rescale_factor: float = None,
|
262
|
+
do_normalize: bool = None,
|
263
|
+
image_mean: Optional[Union[float, List[float]]] = None,
|
264
|
+
image_std: Optional[Union[float, List[float]]] = None,
|
265
|
+
do_convert_rgb: bool = None,
|
266
|
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
267
|
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
268
|
+
):
|
269
|
+
"""
|
270
|
+
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
|
271
|
+
|
272
|
+
Args:
|
273
|
+
images (`ImageInput`):
|
274
|
+
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
|
275
|
+
vision_info (`List[Dict]`, *optional*):
|
276
|
+
Optional list of dictionaries containing additional information about vision inputs.
|
277
|
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
278
|
+
Whether to resize the image.
|
279
|
+
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
280
|
+
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
|
281
|
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
282
|
+
Whether to rescale the image.
|
283
|
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
284
|
+
Scale factor to use if rescaling the image.
|
285
|
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
286
|
+
Whether to normalize the image.
|
287
|
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
288
|
+
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
289
|
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
290
|
+
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
291
|
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
292
|
+
Whether to convert the image to RGB.
|
293
|
+
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
|
294
|
+
The channel dimension format for the output image. Can be one of:
|
295
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
296
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
297
|
+
- Unset: Use the channel dimension format of the input image.
|
298
|
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
299
|
+
The channel dimension format for the input image. Can be one of:
|
300
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
301
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
302
|
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
303
|
+
"""
|
304
|
+
images = make_list_of_images(images)
|
305
|
+
|
306
|
+
if do_convert_rgb:
|
307
|
+
images = [convert_to_rgb(image) for image in images]
|
308
|
+
|
309
|
+
# All transformations expect numpy arrays.
|
310
|
+
images = [to_numpy_array(image) for image in images]
|
311
|
+
|
312
|
+
if is_scaled_image(images[0]) and do_rescale:
|
313
|
+
logging.warning_once(
|
314
|
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
315
|
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
316
|
+
)
|
317
|
+
if input_data_format is None:
|
318
|
+
# We assume that all images have the same channel dimension format.
|
319
|
+
input_data_format = infer_channel_dimension_format(images[0])
|
320
|
+
|
321
|
+
height, width = get_image_size(images[0], channel_dim=input_data_format)
|
322
|
+
resized_height, resized_width = height, width
|
323
|
+
processed_images = []
|
324
|
+
|
325
|
+
for image in images:
|
326
|
+
|
327
|
+
if do_resize:
|
328
|
+
resized_height, resized_width = smart_resize(
|
329
|
+
height,
|
330
|
+
width,
|
331
|
+
factor=self.patch_size * self.merge_size,
|
332
|
+
min_pixels=self.min_pixels,
|
333
|
+
max_pixels=self.max_pixels,
|
334
|
+
max_ratio=MAX_RATIO,
|
335
|
+
)
|
336
|
+
image = image.astype("uint8")
|
337
|
+
image = resize(
|
338
|
+
image,
|
339
|
+
(resized_width, resized_height),
|
340
|
+
interp=None,
|
341
|
+
backend="cv2",
|
342
|
+
)
|
343
|
+
|
344
|
+
if do_rescale:
|
345
|
+
image = image.astype("float32")
|
346
|
+
image *= rescale_factor
|
347
|
+
|
348
|
+
if do_normalize:
|
349
|
+
assert input_data_format == ChannelDimension.LAST
|
350
|
+
image = (image - self.image_mean) / self.image_std
|
351
|
+
|
352
|
+
image = to_channel_dimension_format(
|
353
|
+
image, data_format, input_channel_dim=input_data_format
|
354
|
+
)
|
355
|
+
processed_images.append(image)
|
356
|
+
|
357
|
+
patches = np.array(processed_images)
|
358
|
+
if data_format == ChannelDimension.LAST:
|
359
|
+
patches = patches.transpose([0, 3, 1, 2])
|
360
|
+
if patches.shape[0] == 1:
|
361
|
+
patches = np.tile(patches, (self.temporal_patch_size, 1, 1, 1))
|
362
|
+
channel = patches.shape[1]
|
363
|
+
grid_t = patches.shape[0] // self.temporal_patch_size
|
364
|
+
grid_h, grid_w = (
|
365
|
+
resized_height // self.patch_size,
|
366
|
+
resized_width // self.patch_size,
|
367
|
+
)
|
368
|
+
patches = patches.reshape(
|
369
|
+
[
|
370
|
+
grid_t,
|
371
|
+
self.temporal_patch_size,
|
372
|
+
channel,
|
373
|
+
grid_h // self.merge_size,
|
374
|
+
self.merge_size,
|
375
|
+
self.patch_size,
|
376
|
+
grid_w // self.merge_size,
|
377
|
+
self.merge_size,
|
378
|
+
self.patch_size,
|
379
|
+
]
|
380
|
+
)
|
381
|
+
patches = patches.transpose([0, 3, 6, 4, 7, 2, 1, 5, 8])
|
382
|
+
flatten_patches = patches.reshape(
|
383
|
+
[
|
384
|
+
grid_t * grid_h * grid_w,
|
385
|
+
channel * self.temporal_patch_size * self.patch_size * self.patch_size,
|
386
|
+
]
|
387
|
+
)
|
388
|
+
|
389
|
+
return flatten_patches, (grid_t, grid_h, grid_w)
|
390
|
+
|
391
|
+
def __call__(
|
392
|
+
self,
|
393
|
+
images: ImageInput,
|
394
|
+
do_resize: bool = None,
|
395
|
+
size: Dict[str, int] = None,
|
396
|
+
resample: PILImageResampling = None,
|
397
|
+
do_rescale: bool = None,
|
398
|
+
rescale_factor: float = None,
|
399
|
+
do_normalize: bool = None,
|
400
|
+
image_mean: Optional[Union[float, List[float]]] = None,
|
401
|
+
image_std: Optional[Union[float, List[float]]] = None,
|
402
|
+
do_convert_rgb: bool = None,
|
403
|
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
404
|
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
405
|
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
406
|
+
):
|
407
|
+
"""
|
408
|
+
Args:
|
409
|
+
images (`ImageInput`):
|
410
|
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
411
|
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
412
|
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
413
|
+
Whether to resize the image.
|
414
|
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
415
|
+
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
416
|
+
the longest edge resized to keep the input aspect ratio.
|
417
|
+
resample (`int`, *optional*, defaults to `self.resample`):
|
418
|
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
419
|
+
has an effect if `do_resize` is set to `True`.
|
420
|
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
421
|
+
Whether to rescale the image.
|
422
|
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
423
|
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
424
|
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
425
|
+
Whether to normalize the image.
|
426
|
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
427
|
+
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
428
|
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
429
|
+
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
430
|
+
`True`.
|
431
|
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
432
|
+
Whether to convert the image to RGB.
|
433
|
+
return_tensors (`str` or `TensorType`, *optional*):
|
434
|
+
The type of tensors to return. Can be one of:
|
435
|
+
- Unset: Return a list of `np.ndarray`.
|
436
|
+
- `TensorType.PADDLE` or `'pt'`: Return a batch of type `torch.Tensor`.
|
437
|
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
438
|
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
439
|
+
The channel dimension format for the output image. Can be one of:
|
440
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
441
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
442
|
+
- Unset: Use the channel dimension format of the input image.
|
443
|
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
444
|
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
445
|
+
from the input image. Can be one of:
|
446
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
447
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
448
|
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
449
|
+
|
450
|
+
"""
|
451
|
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
452
|
+
size = size if size is not None else self.size
|
453
|
+
resample = resample if resample is not None else self.resample
|
454
|
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
455
|
+
rescale_factor = (
|
456
|
+
rescale_factor if rescale_factor is not None else self.rescale_factor
|
457
|
+
)
|
458
|
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
459
|
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
460
|
+
image_std = image_std if image_std is not None else self.image_std
|
461
|
+
do_convert_rgb = (
|
462
|
+
do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
463
|
+
)
|
464
|
+
|
465
|
+
if images is not None:
|
466
|
+
images = make_batched_images(images)
|
467
|
+
|
468
|
+
if images is not None and not valid_images(images):
|
469
|
+
raise ValueError(
|
470
|
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
471
|
+
"paddle.Tensor."
|
472
|
+
)
|
473
|
+
|
474
|
+
if images is not None:
|
475
|
+
pixel_values, vision_grid_thws = [], []
|
476
|
+
for image in images:
|
477
|
+
patches, image_grid_thw = self._preprocess(
|
478
|
+
image,
|
479
|
+
do_resize=do_resize,
|
480
|
+
resample=resample,
|
481
|
+
do_rescale=do_rescale,
|
482
|
+
rescale_factor=rescale_factor,
|
483
|
+
do_normalize=do_normalize,
|
484
|
+
image_mean=image_mean,
|
485
|
+
image_std=image_std,
|
486
|
+
data_format=data_format,
|
487
|
+
do_convert_rgb=do_convert_rgb,
|
488
|
+
input_data_format=input_data_format,
|
489
|
+
)
|
490
|
+
pixel_values.extend(patches)
|
491
|
+
vision_grid_thws.append(image_grid_thw)
|
492
|
+
pixel_values = np.array(pixel_values)
|
493
|
+
vision_grid_thws = np.array(vision_grid_thws)
|
494
|
+
data = {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}
|
495
|
+
|
496
|
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
497
|
+
|
498
|
+
|
499
|
+
class PPDocBee2Processor(Qwen2_5_VLProcessor):
|
500
|
+
"""
|
501
|
+
PP-DocBee processor, based on Qwen2VLProcessor
|
502
|
+
"""
|
503
|
+
|
504
|
+
@benchmark.timeit
|
505
|
+
def preprocess(self, input_dicts: List[Dict]):
|
506
|
+
"""
|
507
|
+
PreProcess for PP-DocBee2 Series
|
508
|
+
"""
|
509
|
+
assert (isinstance(input_dict, dict) for input_dict in input_dicts)
|
510
|
+
|
511
|
+
prompt = (
|
512
|
+
"<|im_start|>system\n"
|
513
|
+
"You are a helpful assistant.<|im_end|>\n"
|
514
|
+
"<|im_start|>user\n"
|
515
|
+
"<|vision_start|><|image_pad|><|vision_end|>{query}<|im_end|>\n"
|
516
|
+
"<|im_start|>assistant\n"
|
517
|
+
)
|
518
|
+
query_inputs = [
|
519
|
+
prompt.format(query=input_dict["query"]) for input_dict in input_dicts
|
520
|
+
]
|
521
|
+
image_inputs = [
|
522
|
+
fetch_image(
|
523
|
+
input_dict["image"],
|
524
|
+
size_factor=IMAGE_FACTOR,
|
525
|
+
min_pixels=MIN_PIXELS,
|
526
|
+
max_pixels=MAX_PIXELS,
|
527
|
+
max_ratio=MAX_RATIO,
|
528
|
+
)
|
529
|
+
for input_dict in input_dicts
|
530
|
+
]
|
531
|
+
|
532
|
+
rst_inputs = super().preprocess(
|
533
|
+
text=query_inputs,
|
534
|
+
images=image_inputs,
|
535
|
+
padding=True,
|
536
|
+
return_tensors="pd",
|
537
|
+
)
|
538
|
+
|
539
|
+
return rst_inputs
|
540
|
+
|
541
|
+
@benchmark.timeit
|
542
|
+
def postprocess(self, model_pred, *args, **kwargs) -> List[str]:
|
543
|
+
"""
|
544
|
+
Post process adapt for PaddleX
|
545
|
+
"""
|
546
|
+
return self.tokenizer.batch_decode(
|
547
|
+
model_pred[0], skip_special_tokens=True, clean_up_tokenization_spaces=False
|
548
|
+
)
|