paddlex 2.0.0rc4__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddlex/.version +1 -0
- paddlex/__init__.py +35 -18
- paddlex/__main__.py +39 -0
- paddlex/configs/modules/3d_bev_detection/BEVFusion.yaml +38 -0
- paddlex/configs/modules/chart_parsing/PP-Chart2Table.yaml +13 -0
- paddlex/configs/modules/doc_text_orientation/PP-LCNet_x1_0_doc_ori.yaml +41 -0
- paddlex/configs/modules/doc_vlm/PP-DocBee-2B.yaml +14 -0
- paddlex/configs/modules/doc_vlm/PP-DocBee-7B.yaml +14 -0
- paddlex/configs/modules/doc_vlm/PP-DocBee2-3B.yaml +14 -0
- paddlex/configs/modules/face_detection/BlazeFace-FPN-SSH.yaml +40 -0
- paddlex/configs/modules/face_detection/BlazeFace.yaml +40 -0
- paddlex/configs/modules/face_detection/PP-YOLOE_plus-S_face.yaml +40 -0
- paddlex/configs/modules/face_detection/PicoDet_LCNet_x2_5_face.yaml +40 -0
- paddlex/configs/modules/face_feature/MobileFaceNet.yaml +41 -0
- paddlex/configs/modules/face_feature/ResNet50_face.yaml +41 -0
- paddlex/configs/modules/formula_recognition/LaTeX_OCR_rec.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet-L.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet-S.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-L.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-M.yaml +40 -0
- paddlex/configs/modules/formula_recognition/PP-FormulaNet_plus-S.yaml +40 -0
- paddlex/configs/modules/formula_recognition/UniMERNet.yaml +40 -0
- paddlex/configs/modules/human_detection/PP-YOLOE-L_human.yaml +42 -0
- paddlex/configs/modules/human_detection/PP-YOLOE-S_human.yaml +42 -0
- paddlex/configs/modules/image_anomaly_detection/STFPM.yaml +41 -0
- paddlex/configs/modules/image_classification/CLIP_vit_base_patch16_224.yaml +41 -0
- paddlex/configs/modules/image_classification/CLIP_vit_large_patch14_224.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_base_224.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_base_384.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_large_224.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_large_384.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_small.yaml +41 -0
- paddlex/configs/modules/image_classification/ConvNeXt_tiny.yaml +41 -0
- paddlex/configs/modules/image_classification/FasterNet-L.yaml +40 -0
- paddlex/configs/modules/image_classification/FasterNet-M.yaml +40 -0
- paddlex/configs/modules/image_classification/FasterNet-S.yaml +40 -0
- paddlex/configs/modules/image_classification/FasterNet-T0.yaml +40 -0
- paddlex/configs/modules/image_classification/FasterNet-T1.yaml +40 -0
- paddlex/configs/modules/image_classification/FasterNet-T2.yaml +40 -0
- paddlex/configs/modules/image_classification/MobileNetV1_x0_25.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV1_x0_5.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV1_x0_75.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV1_x1_0.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV2_x0_25.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV2_x0_5.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV2_x1_0.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV2_x1_5.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV2_x2_0.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_large_x0_35.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_large_x0_5.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_large_x0_75.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_large_x1_0.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_large_x1_25.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_small_x0_35.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_small_x0_5.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_small_x0_75.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_small_x1_0.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV3_small_x1_25.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV4_conv_large.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV4_conv_medium.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV4_conv_small.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV4_hybrid_large.yaml +41 -0
- paddlex/configs/modules/image_classification/MobileNetV4_hybrid_medium.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B0.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B1.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B2.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B3.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B4.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B5.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNetV2-B6.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNet_base.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNet_small.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-HGNet_tiny.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNetV2_base.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNetV2_large.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNetV2_small.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x0_25.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x0_35.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x0_5.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x0_75.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x1_0.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x1_5.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x2_0.yaml +41 -0
- paddlex/configs/modules/image_classification/PP-LCNet_x2_5.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet101.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet101_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet152.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet152_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet18.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet18_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet200_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet34.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet34_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet50.yaml +41 -0
- paddlex/configs/modules/image_classification/ResNet50_vd.yaml +41 -0
- paddlex/configs/modules/image_classification/StarNet-S1.yaml +41 -0
- paddlex/configs/modules/image_classification/StarNet-S2.yaml +41 -0
- paddlex/configs/modules/image_classification/StarNet-S3.yaml +41 -0
- paddlex/configs/modules/image_classification/StarNet-S4.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_base_patch4_window12_384.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_base_patch4_window7_224.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_large_patch4_window12_384.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_large_patch4_window7_224.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_small_patch4_window7_224.yaml +41 -0
- paddlex/configs/modules/image_classification/SwinTransformer_tiny_patch4_window7_224.yaml +41 -0
- paddlex/configs/modules/image_feature/PP-ShiTuV2_rec.yaml +42 -0
- paddlex/configs/modules/image_feature/PP-ShiTuV2_rec_CLIP_vit_base.yaml +42 -0
- paddlex/configs/modules/image_feature/PP-ShiTuV2_rec_CLIP_vit_large.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/PP-HGNetV2-B0_ML.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/PP-HGNetV2-B4_ML.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/PP-HGNetV2-B6_ML.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml +41 -0
- paddlex/configs/modules/image_multilabel_classification/ResNet50_ML.yaml +41 -0
- paddlex/configs/modules/image_unwarping/UVDoc.yaml +12 -0
- paddlex/configs/modules/instance_segmentation/Cascade-MaskRCNN-ResNet50-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Mask-RT-DETR-H.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Mask-RT-DETR-L.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Mask-RT-DETR-M.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Mask-RT-DETR-S.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/Mask-RT-DETR-X.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNeXt101-vd-FPN.yaml +39 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNet101-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNet101-vd-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNet50-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNet50-vd-FPN.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/MaskRCNN-ResNet50.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/PP-YOLOE_seg-S.yaml +40 -0
- paddlex/configs/modules/instance_segmentation/SOLOv2.yaml +40 -0
- paddlex/configs/modules/keypoint_detection/PP-TinyPose_128x96.yaml +40 -0
- paddlex/configs/modules/keypoint_detection/PP-TinyPose_256x192.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocBlockLayout.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-L.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-M.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout-S.yaml +40 -0
- paddlex/configs/modules/layout_detection/PP-DocLayout_plus-L.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet-L_layout_17cls.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet-L_layout_3cls.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet-S_layout_17cls.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet-S_layout_3cls.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet_layout_1x.yaml +40 -0
- paddlex/configs/modules/layout_detection/PicoDet_layout_1x_table.yaml +40 -0
- paddlex/configs/modules/layout_detection/RT-DETR-H_layout_17cls.yaml +40 -0
- paddlex/configs/modules/layout_detection/RT-DETR-H_layout_3cls.yaml +40 -0
- paddlex/configs/modules/mainbody_detection/PP-ShiTuV2_det.yaml +41 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_base.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_large.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_medium.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_small.yaml +12 -0
- paddlex/configs/modules/multilingual_speech_recognition/whisper_tiny.yaml +12 -0
- paddlex/configs/modules/object_detection/Cascade-FasterRCNN-ResNet50-FPN.yaml +41 -0
- paddlex/configs/modules/object_detection/Cascade-FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/CenterNet-DLA-34.yaml +41 -0
- paddlex/configs/modules/object_detection/CenterNet-ResNet50.yaml +41 -0
- paddlex/configs/modules/object_detection/Co-DINO-R50.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-DINO-Swin-L.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-Deformable-DETR-R50.yaml +40 -0
- paddlex/configs/modules/object_detection/Co-Deformable-DETR-Swin-T.yaml +40 -0
- paddlex/configs/modules/object_detection/DETR-R50.yaml +42 -0
- paddlex/configs/modules/object_detection/FCOS-ResNet50.yaml +41 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNeXt101-vd-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet101-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet101.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet34-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet50-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet50-vd-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-ResNet50.yaml +42 -0
- paddlex/configs/modules/object_detection/FasterRCNN-Swin-Tiny-FPN.yaml +42 -0
- paddlex/configs/modules/object_detection/PP-YOLOE_plus-L.yaml +40 -0
- paddlex/configs/modules/object_detection/PP-YOLOE_plus-M.yaml +40 -0
- paddlex/configs/modules/object_detection/PP-YOLOE_plus-S.yaml +40 -0
- paddlex/configs/modules/object_detection/PP-YOLOE_plus-X.yaml +40 -0
- paddlex/configs/modules/object_detection/PicoDet-L.yaml +40 -0
- paddlex/configs/modules/object_detection/PicoDet-M.yaml +42 -0
- paddlex/configs/modules/object_detection/PicoDet-S.yaml +40 -0
- paddlex/configs/modules/object_detection/PicoDet-XS.yaml +42 -0
- paddlex/configs/modules/object_detection/RT-DETR-H.yaml +40 -0
- paddlex/configs/modules/object_detection/RT-DETR-L.yaml +40 -0
- paddlex/configs/modules/object_detection/RT-DETR-R18.yaml +40 -0
- paddlex/configs/modules/object_detection/RT-DETR-R50.yaml +40 -0
- paddlex/configs/modules/object_detection/RT-DETR-X.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-L.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-M.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-N.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-S.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-T.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOX-X.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOv3-DarkNet53.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOv3-MobileNetV3.yaml +40 -0
- paddlex/configs/modules/object_detection/YOLOv3-ResNet50_vd_DCN.yaml +40 -0
- paddlex/configs/modules/open_vocabulary_detection/GroundingDINO-T.yaml +13 -0
- paddlex/configs/modules/open_vocabulary_detection/YOLO-Worldv2-L.yaml +13 -0
- paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_box.yaml +17 -0
- paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_point.yaml +15 -0
- paddlex/configs/modules/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml +41 -0
- paddlex/configs/modules/rotated_object_detection/PP-YOLOE-R-L.yaml +40 -0
- paddlex/configs/modules/seal_text_detection/PP-OCRv4_mobile_seal_det.yaml +40 -0
- paddlex/configs/modules/seal_text_detection/PP-OCRv4_server_seal_det.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/Deeplabv3-R101.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/Deeplabv3-R50.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/Deeplabv3_Plus-R101.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/Deeplabv3_Plus-R50.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/MaskFormer_small.yaml +42 -0
- paddlex/configs/modules/semantic_segmentation/MaskFormer_tiny.yaml +42 -0
- paddlex/configs/modules/semantic_segmentation/OCRNet_HRNet-W18.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/OCRNet_HRNet-W48.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/PP-LiteSeg-B.yaml +41 -0
- paddlex/configs/modules/semantic_segmentation/PP-LiteSeg-T.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_base.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_large.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_small.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SeaFormer_tiny.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B0.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B1.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B2.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B3.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B4.yaml +40 -0
- paddlex/configs/modules/semantic_segmentation/SegFormer-B5.yaml +40 -0
- paddlex/configs/modules/small_object_detection/PP-YOLOE_plus_SOD-L.yaml +42 -0
- paddlex/configs/modules/small_object_detection/PP-YOLOE_plus_SOD-S.yaml +42 -0
- paddlex/configs/modules/small_object_detection/PP-YOLOE_plus_SOD-largesize-L.yaml +42 -0
- paddlex/configs/modules/table_cells_detection/RT-DETR-L_wired_table_cell_det.yaml +40 -0
- paddlex/configs/modules/table_cells_detection/RT-DETR-L_wireless_table_cell_det.yaml +40 -0
- paddlex/configs/modules/table_classification/PP-LCNet_x1_0_table_cls.yaml +41 -0
- paddlex/configs/modules/table_structure_recognition/SLANeXt_wired.yaml +39 -0
- paddlex/configs/modules/table_structure_recognition/SLANeXt_wireless.yaml +39 -0
- paddlex/configs/modules/table_structure_recognition/SLANet.yaml +39 -0
- paddlex/configs/modules/table_structure_recognition/SLANet_plus.yaml +39 -0
- paddlex/configs/modules/text_detection/PP-OCRv3_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv3_server_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv4_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv4_server_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_mobile_det.yaml +40 -0
- paddlex/configs/modules/text_detection/PP-OCRv5_server_det.yaml +40 -0
- paddlex/configs/modules/text_recognition/PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv4_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv4_server_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv4_server_rec_doc.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/PP-OCRv5_server_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/arabic_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ch_RepSVTR_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ch_SVTRv2_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/chinese_cht_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/cyrillic_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/devanagari_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/en_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/en_PP-OCRv4_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/japan_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ka_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/korean_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/latin_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/ta_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/text_recognition/te_PP-OCRv3_mobile_rec.yaml +39 -0
- paddlex/configs/modules/textline_orientation/PP-LCNet_x0_25_textline_ori.yaml +41 -0
- paddlex/configs/modules/ts_anomaly_detection/AutoEncoder_ad.yaml +37 -0
- paddlex/configs/modules/ts_anomaly_detection/DLinear_ad.yaml +37 -0
- paddlex/configs/modules/ts_anomaly_detection/Nonstationary_ad.yaml +37 -0
- paddlex/configs/modules/ts_anomaly_detection/PatchTST_ad.yaml +37 -0
- paddlex/configs/modules/ts_anomaly_detection/TimesNet_ad.yaml +37 -0
- paddlex/configs/modules/ts_classification/TimesNet_cls.yaml +37 -0
- paddlex/configs/modules/ts_forecast/DLinear.yaml +38 -0
- paddlex/configs/modules/ts_forecast/NLinear.yaml +38 -0
- paddlex/configs/modules/ts_forecast/Nonstationary.yaml +38 -0
- paddlex/configs/modules/ts_forecast/PatchTST.yaml +38 -0
- paddlex/configs/modules/ts_forecast/RLinear.yaml +38 -0
- paddlex/configs/modules/ts_forecast/TiDE.yaml +38 -0
- paddlex/configs/modules/ts_forecast/TimesNet.yaml +38 -0
- paddlex/configs/modules/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml +41 -0
- paddlex/configs/modules/vehicle_detection/PP-YOLOE-L_vehicle.yaml +41 -0
- paddlex/configs/modules/vehicle_detection/PP-YOLOE-S_vehicle.yaml +42 -0
- paddlex/configs/modules/video_classification/PP-TSM-R50_8frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_16frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_8frames_uniform.yaml +42 -0
- paddlex/configs/modules/video_detection/YOWO.yaml +40 -0
- paddlex/configs/pipelines/3d_bev_detection.yaml +9 -0
- paddlex/configs/pipelines/OCR.yaml +45 -0
- paddlex/configs/pipelines/PP-ChatOCRv3-doc.yaml +151 -0
- paddlex/configs/pipelines/PP-ChatOCRv4-doc.yaml +237 -0
- paddlex/configs/pipelines/PP-ShiTuV2.yaml +18 -0
- paddlex/configs/pipelines/PP-StructureV3.yaml +226 -0
- paddlex/configs/pipelines/anomaly_detection.yaml +8 -0
- paddlex/configs/pipelines/doc_preprocessor.yaml +15 -0
- paddlex/configs/pipelines/doc_understanding.yaml +9 -0
- paddlex/configs/pipelines/face_recognition.yaml +18 -0
- paddlex/configs/pipelines/formula_recognition.yaml +39 -0
- paddlex/configs/pipelines/human_keypoint_detection.yaml +17 -0
- paddlex/configs/pipelines/image_classification.yaml +10 -0
- paddlex/configs/pipelines/image_multilabel_classification.yaml +9 -0
- paddlex/configs/pipelines/instance_segmentation.yaml +10 -0
- paddlex/configs/pipelines/layout_parsing.yaml +102 -0
- paddlex/configs/pipelines/multilingual_speech_recognition.yaml +9 -0
- paddlex/configs/pipelines/object_detection.yaml +10 -0
- paddlex/configs/pipelines/open_vocabulary_detection.yaml +12 -0
- paddlex/configs/pipelines/open_vocabulary_segmentation.yaml +13 -0
- paddlex/configs/pipelines/pedestrian_attribute_recognition.yaml +15 -0
- paddlex/configs/pipelines/rotated_object_detection.yaml +10 -0
- paddlex/configs/pipelines/seal_recognition.yaml +52 -0
- paddlex/configs/pipelines/semantic_segmentation.yaml +10 -0
- paddlex/configs/pipelines/small_object_detection.yaml +10 -0
- paddlex/configs/pipelines/table_recognition.yaml +57 -0
- paddlex/configs/pipelines/table_recognition_v2.yaml +82 -0
- paddlex/configs/pipelines/ts_anomaly_detection.yaml +8 -0
- paddlex/configs/pipelines/ts_classification.yaml +8 -0
- paddlex/configs/pipelines/ts_forecast.yaml +8 -0
- paddlex/configs/pipelines/vehicle_attribute_recognition.yaml +15 -0
- paddlex/configs/pipelines/video_classification.yaml +9 -0
- paddlex/configs/pipelines/video_detection.yaml +10 -0
- paddlex/constants.py +17 -0
- paddlex/engine.py +56 -0
- paddlex/hpip_links.html +31 -0
- paddlex/inference/__init__.py +19 -0
- paddlex/inference/common/__init__.py +13 -0
- paddlex/inference/common/batch_sampler/__init__.py +21 -0
- paddlex/inference/common/batch_sampler/audio_batch_sampler.py +83 -0
- paddlex/inference/common/batch_sampler/base_batch_sampler.py +94 -0
- paddlex/inference/common/batch_sampler/det_3d_batch_sampler.py +144 -0
- paddlex/inference/common/batch_sampler/doc_vlm_batch_sampler.py +87 -0
- paddlex/inference/common/batch_sampler/image_batch_sampler.py +121 -0
- paddlex/inference/common/batch_sampler/ts_batch_sampler.py +109 -0
- paddlex/inference/common/batch_sampler/video_batch_sampler.py +74 -0
- paddlex/inference/common/reader/__init__.py +19 -0
- paddlex/inference/common/reader/audio_reader.py +46 -0
- paddlex/inference/common/reader/det_3d_reader.py +241 -0
- paddlex/inference/common/reader/image_reader.py +73 -0
- paddlex/inference/common/reader/ts_reader.py +46 -0
- paddlex/inference/common/reader/video_reader.py +42 -0
- paddlex/inference/common/result/__init__.py +29 -0
- paddlex/inference/common/result/base_cv_result.py +41 -0
- paddlex/inference/common/result/base_result.py +72 -0
- paddlex/inference/common/result/base_ts_result.py +41 -0
- paddlex/inference/common/result/base_video_result.py +36 -0
- paddlex/inference/common/result/mixin.py +709 -0
- paddlex/inference/models/__init__.py +86 -0
- paddlex/inference/models/anomaly_detection/__init__.py +15 -0
- paddlex/inference/models/anomaly_detection/predictor.py +135 -0
- paddlex/inference/models/anomaly_detection/processors.py +53 -0
- paddlex/inference/models/anomaly_detection/result.py +71 -0
- paddlex/inference/models/base/__init__.py +15 -0
- paddlex/inference/models/base/predictor/__init__.py +15 -0
- paddlex/inference/models/base/predictor/base_predictor.py +414 -0
- paddlex/inference/models/common/__init__.py +26 -0
- paddlex/inference/models/common/static_infer.py +801 -0
- paddlex/inference/models/common/tokenizer/__init__.py +21 -0
- paddlex/inference/models/common/tokenizer/bert_tokenizer.py +655 -0
- paddlex/inference/models/common/tokenizer/clip_tokenizer.py +609 -0
- paddlex/inference/models/common/tokenizer/gpt_tokenizer.py +453 -0
- paddlex/inference/models/common/tokenizer/qwen2_5_tokenizer.py +112 -0
- paddlex/inference/models/common/tokenizer/qwen2_tokenizer.py +438 -0
- paddlex/inference/models/common/tokenizer/qwen_tokenizer.py +288 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils.py +2149 -0
- paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py +3720 -0
- paddlex/inference/models/common/tokenizer/utils.py +66 -0
- paddlex/inference/models/common/tokenizer/vocab.py +647 -0
- paddlex/inference/models/common/ts/__init__.py +15 -0
- paddlex/inference/models/common/ts/funcs.py +540 -0
- paddlex/inference/models/common/ts/processors.py +322 -0
- paddlex/inference/models/common/vision/__init__.py +23 -0
- paddlex/inference/models/common/vision/funcs.py +98 -0
- paddlex/inference/models/common/vision/processors.py +285 -0
- paddlex/inference/models/common/vlm/__init__.py +13 -0
- paddlex/inference/models/common/vlm/activations.py +189 -0
- paddlex/inference/models/common/vlm/bert_padding.py +127 -0
- paddlex/inference/models/common/vlm/conversion_utils.py +99 -0
- paddlex/inference/models/common/vlm/distributed.py +229 -0
- paddlex/inference/models/common/vlm/flash_attn_utils.py +119 -0
- paddlex/inference/models/common/vlm/fusion_ops.py +205 -0
- paddlex/inference/models/common/vlm/generation/__init__.py +34 -0
- paddlex/inference/models/common/vlm/generation/configuration_utils.py +533 -0
- paddlex/inference/models/common/vlm/generation/logits_process.py +730 -0
- paddlex/inference/models/common/vlm/generation/stopping_criteria.py +106 -0
- paddlex/inference/models/common/vlm/generation/utils.py +2162 -0
- paddlex/inference/models/common/vlm/transformers/__init__.py +16 -0
- paddlex/inference/models/common/vlm/transformers/configuration_utils.py +1037 -0
- paddlex/inference/models/common/vlm/transformers/conversion_utils.py +408 -0
- paddlex/inference/models/common/vlm/transformers/model_outputs.py +1612 -0
- paddlex/inference/models/common/vlm/transformers/model_utils.py +2014 -0
- paddlex/inference/models/common/vlm/transformers/utils.py +178 -0
- paddlex/inference/models/common/vlm/utils.py +109 -0
- paddlex/inference/models/doc_vlm/__init__.py +15 -0
- paddlex/inference/models/doc_vlm/modeling/GOT_ocr_2_0.py +830 -0
- paddlex/inference/models/doc_vlm/modeling/__init__.py +17 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2.py +1606 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_5_vl.py +3006 -0
- paddlex/inference/models/doc_vlm/modeling/qwen2_vl.py +2495 -0
- paddlex/inference/models/doc_vlm/predictor.py +253 -0
- paddlex/inference/models/doc_vlm/processors/GOT_ocr_2_0.py +97 -0
- paddlex/inference/models/doc_vlm/processors/__init__.py +17 -0
- paddlex/inference/models/doc_vlm/processors/common.py +561 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_5_vl.py +548 -0
- paddlex/inference/models/doc_vlm/processors/qwen2_vl.py +543 -0
- paddlex/inference/models/doc_vlm/result.py +21 -0
- paddlex/inference/models/face_feature/__init__.py +15 -0
- paddlex/inference/models/face_feature/predictor.py +66 -0
- paddlex/inference/models/formula_recognition/__init__.py +15 -0
- paddlex/inference/models/formula_recognition/predictor.py +193 -0
- paddlex/inference/models/formula_recognition/processors.py +1015 -0
- paddlex/inference/models/formula_recognition/result.py +411 -0
- paddlex/inference/models/image_classification/__init__.py +15 -0
- paddlex/inference/models/image_classification/predictor.py +172 -0
- paddlex/inference/models/image_classification/processors.py +89 -0
- paddlex/inference/models/image_classification/result.py +93 -0
- paddlex/inference/models/image_feature/__init__.py +15 -0
- paddlex/inference/models/image_feature/predictor.py +146 -0
- paddlex/inference/models/image_feature/processors.py +31 -0
- paddlex/inference/models/image_feature/result.py +32 -0
- paddlex/inference/models/image_multilabel_classification/__init__.py +15 -0
- paddlex/inference/models/image_multilabel_classification/predictor.py +95 -0
- paddlex/inference/models/image_multilabel_classification/processors.py +89 -0
- paddlex/inference/models/image_multilabel_classification/result.py +96 -0
- paddlex/inference/models/image_unwarping/__init__.py +15 -0
- paddlex/inference/models/image_unwarping/predictor.py +97 -0
- paddlex/inference/models/image_unwarping/processors.py +92 -0
- paddlex/inference/models/image_unwarping/result.py +47 -0
- paddlex/inference/models/instance_segmentation/__init__.py +15 -0
- paddlex/inference/models/instance_segmentation/predictor.py +202 -0
- paddlex/inference/models/instance_segmentation/processors.py +102 -0
- paddlex/inference/models/instance_segmentation/result.py +162 -0
- paddlex/inference/models/keypoint_detection/__init__.py +15 -0
- paddlex/inference/models/keypoint_detection/predictor.py +190 -0
- paddlex/inference/models/keypoint_detection/processors.py +367 -0
- paddlex/inference/models/keypoint_detection/result.py +197 -0
- paddlex/inference/models/m_3d_bev_detection/__init__.py +15 -0
- paddlex/inference/models/m_3d_bev_detection/predictor.py +303 -0
- paddlex/inference/models/m_3d_bev_detection/processors.py +990 -0
- paddlex/inference/models/m_3d_bev_detection/result.py +68 -0
- paddlex/inference/models/m_3d_bev_detection/visualizer_3d.py +169 -0
- paddlex/inference/models/multilingual_speech_recognition/__init__.py +15 -0
- paddlex/inference/models/multilingual_speech_recognition/predictor.py +137 -0
- paddlex/inference/models/multilingual_speech_recognition/processors.py +1933 -0
- paddlex/inference/models/multilingual_speech_recognition/result.py +21 -0
- paddlex/inference/models/object_detection/__init__.py +15 -0
- paddlex/inference/models/object_detection/predictor.py +344 -0
- paddlex/inference/models/object_detection/processors.py +885 -0
- paddlex/inference/models/object_detection/result.py +114 -0
- paddlex/inference/models/object_detection/utils.py +70 -0
- paddlex/inference/models/open_vocabulary_detection/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_detection/predictor.py +172 -0
- paddlex/inference/models/open_vocabulary_detection/processors/__init__.py +16 -0
- paddlex/inference/models/open_vocabulary_detection/processors/common.py +114 -0
- paddlex/inference/models/open_vocabulary_detection/processors/groundingdino_processors.py +496 -0
- paddlex/inference/models/open_vocabulary_detection/processors/yoloworld_processors.py +209 -0
- paddlex/inference/models/open_vocabulary_segmentation/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/predictor.py +113 -0
- paddlex/inference/models/open_vocabulary_segmentation/processors/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/processors/sam_processer.py +249 -0
- paddlex/inference/models/open_vocabulary_segmentation/results/__init__.py +15 -0
- paddlex/inference/models/open_vocabulary_segmentation/results/sam_result.py +149 -0
- paddlex/inference/models/semantic_segmentation/__init__.py +15 -0
- paddlex/inference/models/semantic_segmentation/predictor.py +158 -0
- paddlex/inference/models/semantic_segmentation/processors.py +117 -0
- paddlex/inference/models/semantic_segmentation/result.py +73 -0
- paddlex/inference/models/table_structure_recognition/__init__.py +15 -0
- paddlex/inference/models/table_structure_recognition/predictor.py +161 -0
- paddlex/inference/models/table_structure_recognition/processors.py +229 -0
- paddlex/inference/models/table_structure_recognition/result.py +63 -0
- paddlex/inference/models/text_detection/__init__.py +15 -0
- paddlex/inference/models/text_detection/predictor.py +191 -0
- paddlex/inference/models/text_detection/processors.py +538 -0
- paddlex/inference/models/text_detection/result.py +46 -0
- paddlex/inference/models/text_recognition/__init__.py +15 -0
- paddlex/inference/models/text_recognition/predictor.py +98 -0
- paddlex/inference/models/text_recognition/processors.py +245 -0
- paddlex/inference/models/text_recognition/result.py +76 -0
- paddlex/inference/models/ts_anomaly_detection/__init__.py +15 -0
- paddlex/inference/models/ts_anomaly_detection/predictor.py +141 -0
- paddlex/inference/models/ts_anomaly_detection/processors.py +98 -0
- paddlex/inference/models/ts_anomaly_detection/result.py +83 -0
- paddlex/inference/models/ts_classification/__init__.py +15 -0
- paddlex/inference/models/ts_classification/predictor.py +122 -0
- paddlex/inference/models/ts_classification/processors.py +122 -0
- paddlex/inference/models/ts_classification/result.py +87 -0
- paddlex/inference/models/ts_forecasting/__init__.py +15 -0
- paddlex/inference/models/ts_forecasting/predictor.py +154 -0
- paddlex/inference/models/ts_forecasting/processors.py +158 -0
- paddlex/inference/models/ts_forecasting/result.py +96 -0
- paddlex/inference/models/video_classification/__init__.py +15 -0
- paddlex/inference/models/video_classification/predictor.py +141 -0
- paddlex/inference/models/video_classification/processors.py +409 -0
- paddlex/inference/models/video_classification/result.py +96 -0
- paddlex/inference/models/video_detection/__init__.py +15 -0
- paddlex/inference/models/video_detection/predictor.py +129 -0
- paddlex/inference/models/video_detection/processors.py +463 -0
- paddlex/inference/models/video_detection/result.py +109 -0
- paddlex/inference/pipelines/__init__.py +239 -0
- paddlex/inference/pipelines/_parallel.py +172 -0
- paddlex/inference/pipelines/anomaly_detection/__init__.py +15 -0
- paddlex/inference/pipelines/anomaly_detection/pipeline.py +82 -0
- paddlex/inference/pipelines/attribute_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/attribute_recognition/pipeline.py +120 -0
- paddlex/inference/pipelines/attribute_recognition/result.py +102 -0
- paddlex/inference/pipelines/base.py +156 -0
- paddlex/inference/pipelines/components/__init__.py +29 -0
- paddlex/inference/pipelines/components/chat_server/__init__.py +16 -0
- paddlex/inference/pipelines/components/chat_server/base.py +39 -0
- paddlex/inference/pipelines/components/chat_server/openai_bot_chat.py +236 -0
- paddlex/inference/pipelines/components/common/__init__.py +19 -0
- paddlex/inference/pipelines/components/common/base_operator.py +37 -0
- paddlex/inference/pipelines/components/common/base_result.py +66 -0
- paddlex/inference/pipelines/components/common/convert_points_and_boxes.py +45 -0
- paddlex/inference/pipelines/components/common/crop_image_regions.py +556 -0
- paddlex/inference/pipelines/components/common/seal_det_warp.py +972 -0
- paddlex/inference/pipelines/components/common/sort_boxes.py +85 -0
- paddlex/inference/pipelines/components/common/warp_image.py +50 -0
- paddlex/inference/pipelines/components/faisser.py +357 -0
- paddlex/inference/pipelines/components/prompt_engineering/__init__.py +16 -0
- paddlex/inference/pipelines/components/prompt_engineering/base.py +35 -0
- paddlex/inference/pipelines/components/prompt_engineering/generate_ensemble_prompt.py +128 -0
- paddlex/inference/pipelines/components/prompt_engineering/generate_kie_prompt.py +148 -0
- paddlex/inference/pipelines/components/retriever/__init__.py +16 -0
- paddlex/inference/pipelines/components/retriever/base.py +228 -0
- paddlex/inference/pipelines/components/retriever/openai_bot_retriever.py +70 -0
- paddlex/inference/pipelines/components/retriever/qianfan_bot_retriever.py +166 -0
- paddlex/inference/pipelines/components/utils/__init__.py +13 -0
- paddlex/inference/pipelines/components/utils/mixin.py +206 -0
- paddlex/inference/pipelines/doc_preprocessor/__init__.py +15 -0
- paddlex/inference/pipelines/doc_preprocessor/pipeline.py +209 -0
- paddlex/inference/pipelines/doc_preprocessor/result.py +98 -0
- paddlex/inference/pipelines/doc_understanding/__init__.py +15 -0
- paddlex/inference/pipelines/doc_understanding/pipeline.py +71 -0
- paddlex/inference/pipelines/face_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/face_recognition/pipeline.py +63 -0
- paddlex/inference/pipelines/face_recognition/result.py +44 -0
- paddlex/inference/pipelines/formula_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/formula_recognition/pipeline.py +347 -0
- paddlex/inference/pipelines/formula_recognition/result.py +282 -0
- paddlex/inference/pipelines/image_classification/__init__.py +15 -0
- paddlex/inference/pipelines/image_classification/pipeline.py +90 -0
- paddlex/inference/pipelines/image_multilabel_classification/__init__.py +15 -0
- paddlex/inference/pipelines/image_multilabel_classification/pipeline.py +97 -0
- paddlex/inference/pipelines/instance_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/instance_segmentation/pipeline.py +91 -0
- paddlex/inference/pipelines/keypoint_detection/__init__.py +15 -0
- paddlex/inference/pipelines/keypoint_detection/pipeline.py +158 -0
- paddlex/inference/pipelines/layout_parsing/__init__.py +16 -0
- paddlex/inference/pipelines/layout_parsing/pipeline.py +568 -0
- paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +1382 -0
- paddlex/inference/pipelines/layout_parsing/result.py +191 -0
- paddlex/inference/pipelines/layout_parsing/result_v2.py +745 -0
- paddlex/inference/pipelines/layout_parsing/setting.py +87 -0
- paddlex/inference/pipelines/layout_parsing/utils.py +951 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/__init__.py +16 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py +1143 -0
- paddlex/inference/pipelines/layout_parsing/xycut_enhanced/xycuts.py +562 -0
- paddlex/inference/pipelines/m_3d_bev_detection/__init__.py +15 -0
- paddlex/inference/pipelines/m_3d_bev_detection/pipeline.py +74 -0
- paddlex/inference/pipelines/multilingual_speech_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/multilingual_speech_recognition/pipeline.py +78 -0
- paddlex/inference/pipelines/object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/object_detection/pipeline.py +115 -0
- paddlex/inference/pipelines/ocr/__init__.py +15 -0
- paddlex/inference/pipelines/ocr/pipeline.py +463 -0
- paddlex/inference/pipelines/ocr/result.py +255 -0
- paddlex/inference/pipelines/open_vocabulary_detection/__init__.py +15 -0
- paddlex/inference/pipelines/open_vocabulary_detection/pipeline.py +86 -0
- paddlex/inference/pipelines/open_vocabulary_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/open_vocabulary_segmentation/pipeline.py +100 -0
- paddlex/inference/pipelines/pp_chatocr/__init__.py +16 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_base.py +111 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_v3.py +781 -0
- paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +992 -0
- paddlex/inference/pipelines/pp_shitu_v2/__init__.py +15 -0
- paddlex/inference/pipelines/pp_shitu_v2/pipeline.py +156 -0
- paddlex/inference/pipelines/pp_shitu_v2/result.py +126 -0
- paddlex/inference/pipelines/rotated_object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/rotated_object_detection/pipeline.py +95 -0
- paddlex/inference/pipelines/seal_recognition/__init__.py +15 -0
- paddlex/inference/pipelines/seal_recognition/pipeline.py +335 -0
- paddlex/inference/pipelines/seal_recognition/result.py +89 -0
- paddlex/inference/pipelines/semantic_segmentation/__init__.py +15 -0
- paddlex/inference/pipelines/semantic_segmentation/pipeline.py +95 -0
- paddlex/inference/pipelines/small_object_detection/__init__.py +15 -0
- paddlex/inference/pipelines/small_object_detection/pipeline.py +95 -0
- paddlex/inference/pipelines/table_recognition/__init__.py +16 -0
- paddlex/inference/pipelines/table_recognition/pipeline.py +486 -0
- paddlex/inference/pipelines/table_recognition/pipeline_v2.py +1395 -0
- paddlex/inference/pipelines/table_recognition/result.py +218 -0
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing.py +366 -0
- paddlex/inference/pipelines/table_recognition/table_recognition_post_processing_v2.py +488 -0
- paddlex/inference/pipelines/table_recognition/utils.py +44 -0
- paddlex/inference/pipelines/ts_anomaly_detection/__init__.py +15 -0
- paddlex/inference/pipelines/ts_anomaly_detection/pipeline.py +72 -0
- paddlex/inference/pipelines/ts_classification/__init__.py +15 -0
- paddlex/inference/pipelines/ts_classification/pipeline.py +72 -0
- paddlex/inference/pipelines/ts_forecasting/__init__.py +15 -0
- paddlex/inference/pipelines/ts_forecasting/pipeline.py +72 -0
- paddlex/inference/pipelines/video_classification/__init__.py +15 -0
- paddlex/inference/pipelines/video_classification/pipeline.py +79 -0
- paddlex/inference/pipelines/video_detection/__init__.py +15 -0
- paddlex/inference/pipelines/video_detection/pipeline.py +86 -0
- paddlex/inference/serving/__init__.py +17 -0
- paddlex/inference/serving/basic_serving/__init__.py +18 -0
- paddlex/inference/serving/basic_serving/_app.py +221 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/__init__.py +44 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/__init__.py +13 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/common.py +104 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/image_recognition.py +36 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/_common/ocr.py +95 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/anomaly_detection.py +67 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/doc_preprocessor.py +100 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/doc_understanding.py +153 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/face_recognition.py +226 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/formula_recognition.py +100 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/human_keypoint_detection.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/image_classification.py +69 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/image_multilabel_classification.py +73 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/instance_segmentation.py +87 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py +117 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/m_3d_bev_detection.py +79 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/multilingual_speech_recognition.py +92 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/object_detection.py +77 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ocr.py +102 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_detection.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_segmentation.py +91 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pedestrian_attribute_recognition.py +84 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py +193 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py +223 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_shituv2.py +221 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py +143 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/rotated_object_detection.py +81 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/seal_recognition.py +106 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/semantic_segmentation.py +67 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/small_object_detection.py +72 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py +108 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py +113 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_anomaly_detection.py +65 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_classification.py +64 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/ts_forecast.py +65 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/vehicle_attribute_recognition.py +84 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/video_classification.py +76 -0
- paddlex/inference/serving/basic_serving/_pipeline_apps/video_detection.py +92 -0
- paddlex/inference/serving/basic_serving/_server.py +40 -0
- paddlex/inference/serving/infra/__init__.py +13 -0
- paddlex/inference/serving/infra/config.py +36 -0
- paddlex/inference/serving/infra/models.py +79 -0
- paddlex/inference/serving/infra/storage.py +180 -0
- paddlex/inference/serving/infra/utils.py +285 -0
- paddlex/inference/serving/schemas/__init__.py +13 -0
- paddlex/inference/serving/schemas/anomaly_detection.py +39 -0
- paddlex/inference/serving/schemas/doc_preprocessor.py +54 -0
- paddlex/inference/serving/schemas/doc_understanding.py +78 -0
- paddlex/inference/serving/schemas/face_recognition.py +124 -0
- paddlex/inference/serving/schemas/formula_recognition.py +56 -0
- paddlex/inference/serving/schemas/human_keypoint_detection.py +55 -0
- paddlex/inference/serving/schemas/image_classification.py +45 -0
- paddlex/inference/serving/schemas/image_multilabel_classification.py +47 -0
- paddlex/inference/serving/schemas/instance_segmentation.py +53 -0
- paddlex/inference/serving/schemas/layout_parsing.py +71 -0
- paddlex/inference/serving/schemas/m_3d_bev_detection.py +48 -0
- paddlex/inference/serving/schemas/multilingual_speech_recognition.py +57 -0
- paddlex/inference/serving/schemas/object_detection.py +52 -0
- paddlex/inference/serving/schemas/ocr.py +60 -0
- paddlex/inference/serving/schemas/open_vocabulary_detection.py +52 -0
- paddlex/inference/serving/schemas/open_vocabulary_segmentation.py +52 -0
- paddlex/inference/serving/schemas/pedestrian_attribute_recognition.py +61 -0
- paddlex/inference/serving/schemas/pp_chatocrv3_doc.py +133 -0
- paddlex/inference/serving/schemas/pp_chatocrv4_doc.py +150 -0
- paddlex/inference/serving/schemas/pp_shituv2.py +124 -0
- paddlex/inference/serving/schemas/pp_structurev3.py +88 -0
- paddlex/inference/serving/schemas/rotated_object_detection.py +52 -0
- paddlex/inference/serving/schemas/seal_recognition.py +62 -0
- paddlex/inference/serving/schemas/semantic_segmentation.py +45 -0
- paddlex/inference/serving/schemas/shared/__init__.py +13 -0
- paddlex/inference/serving/schemas/shared/classification.py +23 -0
- paddlex/inference/serving/schemas/shared/image_segmentation.py +28 -0
- paddlex/inference/serving/schemas/shared/object_detection.py +24 -0
- paddlex/inference/serving/schemas/shared/ocr.py +25 -0
- paddlex/inference/serving/schemas/small_object_detection.py +52 -0
- paddlex/inference/serving/schemas/table_recognition.py +64 -0
- paddlex/inference/serving/schemas/table_recognition_v2.py +69 -0
- paddlex/inference/serving/schemas/ts_anomaly_detection.py +37 -0
- paddlex/inference/serving/schemas/ts_classification.py +38 -0
- paddlex/inference/serving/schemas/ts_forecast.py +37 -0
- paddlex/inference/serving/schemas/vehicle_attribute_recognition.py +61 -0
- paddlex/inference/serving/schemas/video_classification.py +44 -0
- paddlex/inference/serving/schemas/video_detection.py +56 -0
- paddlex/inference/utils/__init__.py +13 -0
- paddlex/inference/utils/benchmark.py +379 -0
- paddlex/inference/utils/color_map.py +123 -0
- paddlex/inference/utils/get_pipeline_path.py +27 -0
- paddlex/inference/utils/hpi.py +254 -0
- paddlex/inference/utils/hpi_model_info_collection.json +2331 -0
- paddlex/inference/utils/io/__init__.py +36 -0
- paddlex/inference/utils/io/readers.py +504 -0
- paddlex/inference/utils/io/style.py +381 -0
- paddlex/inference/utils/io/tablepyxl.py +157 -0
- paddlex/inference/utils/io/writers.py +458 -0
- paddlex/inference/utils/model_paths.py +48 -0
- paddlex/inference/utils/new_ir_blocklist.py +27 -0
- paddlex/inference/utils/official_models.py +367 -0
- paddlex/inference/utils/pp_option.py +339 -0
- paddlex/inference/utils/trt_blocklist.py +43 -0
- paddlex/inference/utils/trt_config.py +420 -0
- paddlex/model.py +131 -0
- paddlex/modules/__init__.py +115 -0
- paddlex/modules/anomaly_detection/__init__.py +18 -0
- paddlex/modules/anomaly_detection/dataset_checker/__init__.py +94 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py +82 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/check_dataset.py +91 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/convert_dataset.py +233 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/split_dataset.py +87 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/visualizer.py +76 -0
- paddlex/modules/anomaly_detection/evaluator.py +58 -0
- paddlex/modules/anomaly_detection/exportor.py +22 -0
- paddlex/modules/anomaly_detection/model_list.py +16 -0
- paddlex/modules/anomaly_detection/trainer.py +70 -0
- paddlex/modules/base/__init__.py +18 -0
- paddlex/modules/base/build_model.py +33 -0
- paddlex/modules/base/dataset_checker/__init__.py +16 -0
- paddlex/modules/base/dataset_checker/dataset_checker.py +169 -0
- paddlex/modules/base/dataset_checker/utils.py +108 -0
- paddlex/modules/base/evaluator.py +170 -0
- paddlex/modules/base/exportor.py +145 -0
- paddlex/modules/base/trainer.py +144 -0
- paddlex/modules/base/utils/__init__.py +13 -0
- paddlex/modules/base/utils/cinn_setting.py +89 -0
- paddlex/modules/base/utils/coco_eval.py +94 -0
- paddlex/modules/base/utils/topk_eval.py +118 -0
- paddlex/modules/doc_vlm/__init__.py +18 -0
- paddlex/modules/doc_vlm/dataset_checker.py +29 -0
- paddlex/modules/doc_vlm/evaluator.py +29 -0
- paddlex/modules/doc_vlm/exportor.py +29 -0
- paddlex/modules/doc_vlm/model_list.py +16 -0
- paddlex/modules/doc_vlm/trainer.py +41 -0
- paddlex/modules/face_recognition/__init__.py +18 -0
- paddlex/modules/face_recognition/dataset_checker/__init__.py +71 -0
- paddlex/modules/face_recognition/dataset_checker/dataset_src/__init__.py +16 -0
- paddlex/modules/face_recognition/dataset_checker/dataset_src/check_dataset.py +172 -0
- paddlex/modules/face_recognition/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/face_recognition/dataset_checker/dataset_src/utils/visualizer.py +153 -0
- paddlex/modules/face_recognition/evaluator.py +52 -0
- paddlex/modules/face_recognition/exportor.py +22 -0
- paddlex/modules/face_recognition/model_list.py +15 -0
- paddlex/modules/face_recognition/trainer.py +75 -0
- paddlex/modules/formula_recognition/__init__.py +18 -0
- paddlex/modules/formula_recognition/dataset_checker/__init__.py +113 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/analyse_dataset.py +158 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/check_dataset.py +76 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/convert_dataset.py +95 -0
- paddlex/modules/formula_recognition/dataset_checker/dataset_src/split_dataset.py +80 -0
- paddlex/modules/formula_recognition/evaluator.py +80 -0
- paddlex/modules/formula_recognition/exportor.py +22 -0
- paddlex/modules/formula_recognition/model_list.py +23 -0
- paddlex/modules/formula_recognition/trainer.py +123 -0
- paddlex/modules/general_recognition/__init__.py +18 -0
- paddlex/modules/general_recognition/dataset_checker/__init__.py +107 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/analyse_dataset.py +96 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/check_dataset.py +99 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/convert_dataset.py +100 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/split_dataset.py +82 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/general_recognition/dataset_checker/dataset_src/utils/visualizer.py +147 -0
- paddlex/modules/general_recognition/evaluator.py +31 -0
- paddlex/modules/general_recognition/exportor.py +22 -0
- paddlex/modules/general_recognition/model_list.py +19 -0
- paddlex/modules/general_recognition/trainer.py +52 -0
- paddlex/modules/image_classification/__init__.py +18 -0
- paddlex/modules/image_classification/dataset_checker/__init__.py +104 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py +92 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py +132 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/convert_dataset.py +51 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/split_dataset.py +81 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/image_classification/dataset_checker/dataset_src/utils/visualizer.py +153 -0
- paddlex/modules/image_classification/evaluator.py +43 -0
- paddlex/modules/image_classification/exportor.py +22 -0
- paddlex/modules/image_classification/model_list.py +99 -0
- paddlex/modules/image_classification/trainer.py +82 -0
- paddlex/modules/image_unwarping/__init__.py +13 -0
- paddlex/modules/image_unwarping/model_list.py +17 -0
- paddlex/modules/instance_segmentation/__init__.py +18 -0
- paddlex/modules/instance_segmentation/dataset_checker/__init__.py +107 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py +82 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py +95 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/convert_dataset.py +241 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/split_dataset.py +122 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/instance_segmentation/dataset_checker/dataset_src/utils/visualizer.py +223 -0
- paddlex/modules/instance_segmentation/evaluator.py +32 -0
- paddlex/modules/instance_segmentation/exportor.py +22 -0
- paddlex/modules/instance_segmentation/model_list.py +33 -0
- paddlex/modules/instance_segmentation/trainer.py +31 -0
- paddlex/modules/keypoint_detection/__init__.py +18 -0
- paddlex/modules/keypoint_detection/dataset_checker/__init__.py +56 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/__init__.py +15 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/check_dataset.py +91 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/visualizer.py +124 -0
- paddlex/modules/keypoint_detection/evaluator.py +41 -0
- paddlex/modules/keypoint_detection/exportor.py +22 -0
- paddlex/modules/keypoint_detection/model_list.py +16 -0
- paddlex/modules/keypoint_detection/trainer.py +39 -0
- paddlex/modules/m_3d_bev_detection/__init__.py +18 -0
- paddlex/modules/m_3d_bev_detection/dataset_checker/__init__.py +95 -0
- paddlex/modules/m_3d_bev_detection/dataset_checker/dataset_src/__init__.py +17 -0
- paddlex/modules/m_3d_bev_detection/dataset_checker/dataset_src/analyse_dataset.py +106 -0
- paddlex/modules/m_3d_bev_detection/dataset_checker/dataset_src/check_dataset.py +101 -0
- paddlex/modules/m_3d_bev_detection/evaluator.py +46 -0
- paddlex/modules/m_3d_bev_detection/exportor.py +22 -0
- paddlex/modules/m_3d_bev_detection/model_list.py +18 -0
- paddlex/modules/m_3d_bev_detection/trainer.py +68 -0
- paddlex/modules/multilabel_classification/__init__.py +18 -0
- paddlex/modules/multilabel_classification/dataset_checker/__init__.py +106 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/analyse_dataset.py +94 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/check_dataset.py +132 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/convert_dataset.py +120 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/split_dataset.py +81 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/multilabel_classification/dataset_checker/dataset_src/utils/visualizer.py +149 -0
- paddlex/modules/multilabel_classification/evaluator.py +43 -0
- paddlex/modules/multilabel_classification/exportor.py +22 -0
- paddlex/modules/multilabel_classification/model_list.py +24 -0
- paddlex/modules/multilabel_classification/trainer.py +85 -0
- paddlex/modules/multilingual_speech_recognition/__init__.py +18 -0
- paddlex/modules/multilingual_speech_recognition/dataset_checker.py +27 -0
- paddlex/modules/multilingual_speech_recognition/evaluator.py +27 -0
- paddlex/modules/multilingual_speech_recognition/exportor.py +27 -0
- paddlex/modules/multilingual_speech_recognition/model_list.py +22 -0
- paddlex/modules/multilingual_speech_recognition/trainer.py +42 -0
- paddlex/modules/object_detection/__init__.py +18 -0
- paddlex/modules/object_detection/dataset_checker/__init__.py +106 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py +82 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py +91 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py +438 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/split_dataset.py +123 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/object_detection/dataset_checker/dataset_src/utils/visualizer.py +193 -0
- paddlex/modules/object_detection/evaluator.py +57 -0
- paddlex/modules/object_detection/exportor.py +22 -0
- paddlex/modules/object_detection/model_list.py +86 -0
- paddlex/modules/object_detection/trainer.py +98 -0
- paddlex/modules/open_vocabulary_detection/__init__.py +18 -0
- paddlex/modules/open_vocabulary_detection/dataset_checker.py +29 -0
- paddlex/modules/open_vocabulary_detection/evaluator.py +29 -0
- paddlex/modules/open_vocabulary_detection/exportor.py +29 -0
- paddlex/modules/open_vocabulary_detection/model_list.py +16 -0
- paddlex/modules/open_vocabulary_detection/trainer.py +44 -0
- paddlex/modules/open_vocabulary_segmentation/__init__.py +18 -0
- paddlex/modules/open_vocabulary_segmentation/dataset_checker.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/evaluator.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/exportor.py +29 -0
- paddlex/modules/open_vocabulary_segmentation/model_list.py +19 -0
- paddlex/modules/open_vocabulary_segmentation/trainer.py +44 -0
- paddlex/modules/semantic_segmentation/__init__.py +18 -0
- paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +109 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py +76 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py +80 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/convert_dataset.py +165 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/split_dataset.py +87 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/utils/__init__.py +13 -0
- paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/utils/visualizer.py +75 -0
- paddlex/modules/semantic_segmentation/evaluator.py +58 -0
- paddlex/modules/semantic_segmentation/exportor.py +31 -0
- paddlex/modules/semantic_segmentation/model_list.py +37 -0
- paddlex/modules/semantic_segmentation/trainer.py +72 -0
- paddlex/modules/table_recognition/__init__.py +18 -0
- paddlex/modules/table_recognition/dataset_checker/__init__.py +98 -0
- paddlex/modules/table_recognition/dataset_checker/dataset_src/__init__.py +18 -0
- paddlex/modules/table_recognition/dataset_checker/dataset_src/analyse_dataset.py +59 -0
- paddlex/modules/table_recognition/dataset_checker/dataset_src/check_dataset.py +87 -0
- paddlex/modules/table_recognition/dataset_checker/dataset_src/split_dataset.py +80 -0
- paddlex/modules/table_recognition/evaluator.py +43 -0
- paddlex/modules/table_recognition/exportor.py +22 -0
- paddlex/modules/table_recognition/model_list.py +21 -0
- paddlex/modules/table_recognition/trainer.py +67 -0
- paddlex/modules/text_detection/__init__.py +18 -0
- paddlex/modules/text_detection/dataset_checker/__init__.py +107 -0
- paddlex/modules/text_detection/dataset_checker/dataset_src/__init__.py +18 -0
- paddlex/modules/text_detection/dataset_checker/dataset_src/analyse_dataset.py +220 -0
- paddlex/modules/text_detection/dataset_checker/dataset_src/check_dataset.py +106 -0
- paddlex/modules/text_detection/dataset_checker/dataset_src/split_dataset.py +140 -0
- paddlex/modules/text_detection/evaluator.py +41 -0
- paddlex/modules/text_detection/exportor.py +22 -0
- paddlex/modules/text_detection/model_list.py +26 -0
- paddlex/modules/text_detection/trainer.py +65 -0
- paddlex/modules/text_recognition/__init__.py +18 -0
- paddlex/modules/text_recognition/dataset_checker/__init__.py +125 -0
- paddlex/modules/text_recognition/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/text_recognition/dataset_checker/dataset_src/analyse_dataset.py +162 -0
- paddlex/modules/text_recognition/dataset_checker/dataset_src/check_dataset.py +104 -0
- paddlex/modules/text_recognition/dataset_checker/dataset_src/convert_dataset.py +95 -0
- paddlex/modules/text_recognition/dataset_checker/dataset_src/split_dataset.py +80 -0
- paddlex/modules/text_recognition/evaluator.py +64 -0
- paddlex/modules/text_recognition/exportor.py +22 -0
- paddlex/modules/text_recognition/model_list.py +36 -0
- paddlex/modules/text_recognition/trainer.py +105 -0
- paddlex/modules/ts_anomaly_detection/__init__.py +19 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/__init__.py +111 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py +19 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/check_dataset.py +64 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/convert_dataset.py +74 -0
- paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/split_dataset.py +63 -0
- paddlex/modules/ts_anomaly_detection/evaluator.py +67 -0
- paddlex/modules/ts_anomaly_detection/exportor.py +44 -0
- paddlex/modules/ts_anomaly_detection/model_list.py +22 -0
- paddlex/modules/ts_anomaly_detection/trainer.py +113 -0
- paddlex/modules/ts_classification/__init__.py +19 -0
- paddlex/modules/ts_classification/dataset_checker/__init__.py +111 -0
- paddlex/modules/ts_classification/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/ts_classification/dataset_checker/dataset_src/analyse_dataset.py +77 -0
- paddlex/modules/ts_classification/dataset_checker/dataset_src/check_dataset.py +64 -0
- paddlex/modules/ts_classification/dataset_checker/dataset_src/convert_dataset.py +74 -0
- paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py +88 -0
- paddlex/modules/ts_classification/evaluator.py +66 -0
- paddlex/modules/ts_classification/exportor.py +44 -0
- paddlex/modules/ts_classification/model_list.py +18 -0
- paddlex/modules/ts_classification/trainer.py +108 -0
- paddlex/modules/ts_forecast/__init__.py +19 -0
- paddlex/modules/ts_forecast/dataset_checker/__init__.py +111 -0
- paddlex/modules/ts_forecast/dataset_checker/dataset_src/__init__.py +19 -0
- paddlex/modules/ts_forecast/dataset_checker/dataset_src/analyse_dataset.py +19 -0
- paddlex/modules/ts_forecast/dataset_checker/dataset_src/check_dataset.py +64 -0
- paddlex/modules/ts_forecast/dataset_checker/dataset_src/convert_dataset.py +73 -0
- paddlex/modules/ts_forecast/dataset_checker/dataset_src/split_dataset.py +63 -0
- paddlex/modules/ts_forecast/evaluator.py +66 -0
- paddlex/modules/ts_forecast/exportor.py +44 -0
- paddlex/modules/ts_forecast/model_list.py +24 -0
- paddlex/modules/ts_forecast/trainer.py +108 -0
- paddlex/modules/video_classification/__init__.py +18 -0
- paddlex/modules/video_classification/dataset_checker/__init__.py +93 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/__init__.py +18 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/analyse_dataset.py +93 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/check_dataset.py +120 -0
- paddlex/modules/video_classification/dataset_checker/dataset_src/split_dataset.py +82 -0
- paddlex/modules/video_classification/evaluator.py +44 -0
- paddlex/modules/video_classification/exportor.py +22 -0
- paddlex/modules/video_classification/model_list.py +19 -0
- paddlex/modules/video_classification/trainer.py +88 -0
- paddlex/modules/video_detection/__init__.py +18 -0
- paddlex/modules/video_detection/dataset_checker/__init__.py +86 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/__init__.py +17 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/analyse_dataset.py +100 -0
- paddlex/modules/video_detection/dataset_checker/dataset_src/check_dataset.py +132 -0
- paddlex/modules/video_detection/evaluator.py +42 -0
- paddlex/modules/video_detection/exportor.py +22 -0
- paddlex/modules/video_detection/model_list.py +15 -0
- paddlex/modules/video_detection/trainer.py +82 -0
- paddlex/ops/__init__.py +152 -0
- paddlex/ops/iou3d_nms/iou3d_cpu.cpp +266 -0
- paddlex/ops/iou3d_nms/iou3d_cpu.h +28 -0
- paddlex/ops/iou3d_nms/iou3d_nms.cpp +206 -0
- paddlex/ops/iou3d_nms/iou3d_nms.h +35 -0
- paddlex/ops/iou3d_nms/iou3d_nms_api.cpp +114 -0
- paddlex/ops/iou3d_nms/iou3d_nms_kernel.cu +484 -0
- paddlex/ops/setup.py +37 -0
- paddlex/ops/voxel/voxelize_op.cc +194 -0
- paddlex/ops/voxel/voxelize_op.cu +346 -0
- paddlex/paddlex_cli.py +476 -0
- paddlex/repo_apis/Paddle3D_api/__init__.py +17 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/__init__.py +18 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/config.py +118 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py +238 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/register.py +55 -0
- paddlex/repo_apis/Paddle3D_api/bev_fusion/runner.py +104 -0
- paddlex/repo_apis/Paddle3D_api/pp3d_config.py +145 -0
- paddlex/repo_apis/PaddleClas_api/__init__.py +17 -0
- paddlex/repo_apis/PaddleClas_api/cls/__init__.py +19 -0
- paddlex/repo_apis/PaddleClas_api/cls/config.py +595 -0
- paddlex/repo_apis/PaddleClas_api/cls/model.py +355 -0
- paddlex/repo_apis/PaddleClas_api/cls/register.py +907 -0
- paddlex/repo_apis/PaddleClas_api/cls/runner.py +218 -0
- paddlex/repo_apis/PaddleClas_api/shitu_rec/__init__.py +18 -0
- paddlex/repo_apis/PaddleClas_api/shitu_rec/config.py +141 -0
- paddlex/repo_apis/PaddleClas_api/shitu_rec/model.py +20 -0
- paddlex/repo_apis/PaddleClas_api/shitu_rec/register.py +68 -0
- paddlex/repo_apis/PaddleClas_api/shitu_rec/runner.py +50 -0
- paddlex/repo_apis/PaddleDetection_api/__init__.py +17 -0
- paddlex/repo_apis/PaddleDetection_api/config_helper.py +280 -0
- paddlex/repo_apis/PaddleDetection_api/instance_seg/__init__.py +18 -0
- paddlex/repo_apis/PaddleDetection_api/instance_seg/config.py +457 -0
- paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +403 -0
- paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py +262 -0
- paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py +225 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/__init__.py +19 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/config.py +540 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/model.py +429 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +245 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/register.py +1135 -0
- paddlex/repo_apis/PaddleDetection_api/object_det/runner.py +225 -0
- paddlex/repo_apis/PaddleNLP_api/__init__.py +13 -0
- paddlex/repo_apis/PaddleOCR_api/__init__.py +22 -0
- paddlex/repo_apis/PaddleOCR_api/config_utils.py +53 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/__init__.py +16 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py +571 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py +398 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/register.py +99 -0
- paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py +239 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/__init__.py +16 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/config.py +64 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/model.py +126 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/register.py +70 -0
- paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py +51 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/__init__.py +16 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/config.py +62 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/model.py +72 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/register.py +107 -0
- paddlex/repo_apis/PaddleOCR_api/text_det/runner.py +53 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/__init__.py +16 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +564 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +398 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +216 -0
- paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py +239 -0
- paddlex/repo_apis/PaddleSeg_api/__init__.py +16 -0
- paddlex/repo_apis/PaddleSeg_api/base_seg_config.py +134 -0
- paddlex/repo_apis/PaddleSeg_api/seg/__init__.py +16 -0
- paddlex/repo_apis/PaddleSeg_api/seg/config.py +183 -0
- paddlex/repo_apis/PaddleSeg_api/seg/model.py +491 -0
- paddlex/repo_apis/PaddleSeg_api/seg/register.py +272 -0
- paddlex/repo_apis/PaddleSeg_api/seg/runner.py +261 -0
- paddlex/repo_apis/PaddleTS_api/__init__.py +20 -0
- paddlex/repo_apis/PaddleTS_api/ts_ad/__init__.py +16 -0
- paddlex/repo_apis/PaddleTS_api/ts_ad/config.py +88 -0
- paddlex/repo_apis/PaddleTS_api/ts_ad/register.py +146 -0
- paddlex/repo_apis/PaddleTS_api/ts_ad/runner.py +158 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/__init__.py +13 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/config.py +244 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/model.py +276 -0
- paddlex/repo_apis/PaddleTS_api/ts_base/runner.py +158 -0
- paddlex/repo_apis/PaddleTS_api/ts_cls/__init__.py +16 -0
- paddlex/repo_apis/PaddleTS_api/ts_cls/config.py +72 -0
- paddlex/repo_apis/PaddleTS_api/ts_cls/register.py +59 -0
- paddlex/repo_apis/PaddleTS_api/ts_cls/runner.py +158 -0
- paddlex/repo_apis/PaddleTS_api/ts_fc/__init__.py +16 -0
- paddlex/repo_apis/PaddleTS_api/ts_fc/config.py +136 -0
- paddlex/repo_apis/PaddleTS_api/ts_fc/register.py +186 -0
- paddlex/repo_apis/PaddleVideo_api/__init__.py +17 -0
- paddlex/repo_apis/PaddleVideo_api/config_utils.py +51 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/__init__.py +19 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/config.py +548 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/model.py +346 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/register.py +70 -0
- paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py +204 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/__init__.py +19 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/config.py +549 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/model.py +298 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/register.py +44 -0
- paddlex/repo_apis/PaddleVideo_api/video_det/runner.py +199 -0
- paddlex/repo_apis/__init__.py +13 -0
- paddlex/repo_apis/base/__init__.py +22 -0
- paddlex/repo_apis/base/config.py +237 -0
- paddlex/repo_apis/base/model.py +563 -0
- paddlex/repo_apis/base/register.py +135 -0
- paddlex/repo_apis/base/runner.py +390 -0
- paddlex/repo_apis/base/utils/__init__.py +13 -0
- paddlex/repo_apis/base/utils/arg.py +64 -0
- paddlex/repo_apis/base/utils/subprocess.py +107 -0
- paddlex/repo_manager/__init__.py +17 -0
- paddlex/repo_manager/core.py +253 -0
- paddlex/repo_manager/meta.py +180 -0
- paddlex/repo_manager/repo.py +425 -0
- paddlex/repo_manager/utils.py +148 -0
- paddlex/utils/__init__.py +1 -12
- paddlex/utils/cache.py +146 -0
- paddlex/utils/config.py +216 -0
- paddlex/utils/custom_device_list.py +311 -0
- paddlex/utils/deps.py +249 -0
- paddlex/utils/device.py +195 -0
- paddlex/utils/download.py +168 -182
- paddlex/utils/env.py +31 -48
- paddlex/utils/errors/__init__.py +17 -0
- paddlex/utils/errors/dataset_checker.py +78 -0
- paddlex/utils/errors/others.py +138 -0
- paddlex/utils/file_interface.py +211 -0
- paddlex/utils/flags.py +70 -0
- paddlex/utils/fonts/__init__.py +97 -0
- paddlex/utils/func_register.py +41 -0
- paddlex/utils/install.py +87 -0
- paddlex/utils/interactive_get_pipeline.py +55 -0
- paddlex/utils/lazy_loader.py +68 -0
- paddlex/utils/logging.py +140 -33
- paddlex/utils/misc.py +201 -0
- paddlex/utils/pipeline_arguments.py +719 -0
- paddlex/utils/result_saver.py +58 -0
- paddlex/utils/subclass_register.py +99 -0
- paddlex/version.py +55 -0
- paddlex-3.0.0.dist-info/METADATA +1168 -0
- paddlex-3.0.0.dist-info/RECORD +1093 -0
- paddlex-3.0.0.dist-info/WHEEL +5 -0
- paddlex-3.0.0.dist-info/entry_points.txt +2 -0
- paddlex-3.0.0.dist-info/licenses/LICENSE +169 -0
- paddlex-3.0.0.dist-info/top_level.txt +1 -0
- PaddleClas/__init__.py +0 -16
- PaddleClas/paddleclas.py +0 -375
- PaddleClas/ppcls/__init__.py +0 -20
- PaddleClas/ppcls/data/__init__.py +0 -15
- PaddleClas/ppcls/data/imaug/__init__.py +0 -94
- PaddleClas/ppcls/data/imaug/autoaugment.py +0 -264
- PaddleClas/ppcls/data/imaug/batch_operators.py +0 -117
- PaddleClas/ppcls/data/imaug/cutout.py +0 -41
- PaddleClas/ppcls/data/imaug/fmix.py +0 -217
- PaddleClas/ppcls/data/imaug/grid.py +0 -89
- PaddleClas/ppcls/data/imaug/hide_and_seek.py +0 -44
- PaddleClas/ppcls/data/imaug/operators.py +0 -244
- PaddleClas/ppcls/data/imaug/randaugment.py +0 -106
- PaddleClas/ppcls/data/imaug/random_erasing.py +0 -55
- PaddleClas/ppcls/data/reader.py +0 -318
- PaddleClas/ppcls/modeling/__init__.py +0 -20
- PaddleClas/ppcls/modeling/architectures/__init__.py +0 -51
- PaddleClas/ppcls/modeling/architectures/alexnet.py +0 -132
- PaddleClas/ppcls/modeling/architectures/darknet.py +0 -161
- PaddleClas/ppcls/modeling/architectures/densenet.py +0 -308
- PaddleClas/ppcls/modeling/architectures/distillation_models.py +0 -65
- PaddleClas/ppcls/modeling/architectures/distilled_vision_transformer.py +0 -196
- PaddleClas/ppcls/modeling/architectures/dpn.py +0 -425
- PaddleClas/ppcls/modeling/architectures/efficientnet.py +0 -901
- PaddleClas/ppcls/modeling/architectures/ghostnet.py +0 -331
- PaddleClas/ppcls/modeling/architectures/googlenet.py +0 -207
- PaddleClas/ppcls/modeling/architectures/hrnet.py +0 -742
- PaddleClas/ppcls/modeling/architectures/inception_v3.py +0 -481
- PaddleClas/ppcls/modeling/architectures/inception_v4.py +0 -455
- PaddleClas/ppcls/modeling/architectures/mixnet.py +0 -782
- PaddleClas/ppcls/modeling/architectures/mobilenet_v1.py +0 -266
- PaddleClas/ppcls/modeling/architectures/mobilenet_v2.py +0 -248
- PaddleClas/ppcls/modeling/architectures/mobilenet_v3.py +0 -359
- PaddleClas/ppcls/modeling/architectures/regnet.py +0 -383
- PaddleClas/ppcls/modeling/architectures/repvgg.py +0 -339
- PaddleClas/ppcls/modeling/architectures/res2net.py +0 -272
- PaddleClas/ppcls/modeling/architectures/res2net_vd.py +0 -295
- PaddleClas/ppcls/modeling/architectures/resnest.py +0 -705
- PaddleClas/ppcls/modeling/architectures/resnet.py +0 -316
- PaddleClas/ppcls/modeling/architectures/resnet_vc.py +0 -309
- PaddleClas/ppcls/modeling/architectures/resnet_vd.py +0 -354
- PaddleClas/ppcls/modeling/architectures/resnext.py +0 -253
- PaddleClas/ppcls/modeling/architectures/resnext101_wsl.py +0 -447
- PaddleClas/ppcls/modeling/architectures/resnext_vd.py +0 -266
- PaddleClas/ppcls/modeling/architectures/rexnet.py +0 -240
- PaddleClas/ppcls/modeling/architectures/se_resnet_vd.py +0 -378
- PaddleClas/ppcls/modeling/architectures/se_resnext.py +0 -290
- PaddleClas/ppcls/modeling/architectures/se_resnext_vd.py +0 -285
- PaddleClas/ppcls/modeling/architectures/shufflenet_v2.py +0 -320
- PaddleClas/ppcls/modeling/architectures/squeezenet.py +0 -154
- PaddleClas/ppcls/modeling/architectures/vgg.py +0 -152
- PaddleClas/ppcls/modeling/architectures/vision_transformer.py +0 -402
- PaddleClas/ppcls/modeling/architectures/xception.py +0 -345
- PaddleClas/ppcls/modeling/architectures/xception_deeplab.py +0 -386
- PaddleClas/ppcls/modeling/loss.py +0 -154
- PaddleClas/ppcls/modeling/utils.py +0 -53
- PaddleClas/ppcls/optimizer/__init__.py +0 -19
- PaddleClas/ppcls/optimizer/learning_rate.py +0 -159
- PaddleClas/ppcls/optimizer/optimizer.py +0 -165
- PaddleClas/ppcls/utils/__init__.py +0 -27
- PaddleClas/ppcls/utils/check.py +0 -151
- PaddleClas/ppcls/utils/config.py +0 -201
- PaddleClas/ppcls/utils/logger.py +0 -120
- PaddleClas/ppcls/utils/metrics.py +0 -107
- PaddleClas/ppcls/utils/misc.py +0 -62
- PaddleClas/ppcls/utils/model_zoo.py +0 -213
- PaddleClas/ppcls/utils/save_load.py +0 -163
- PaddleClas/setup.py +0 -55
- PaddleClas/tools/__init__.py +0 -15
- PaddleClas/tools/download.py +0 -50
- PaddleClas/tools/ema.py +0 -58
- PaddleClas/tools/eval.py +0 -112
- PaddleClas/tools/export_model.py +0 -85
- PaddleClas/tools/export_serving_model.py +0 -76
- PaddleClas/tools/infer/__init__.py +0 -16
- PaddleClas/tools/infer/infer.py +0 -94
- PaddleClas/tools/infer/predict.py +0 -117
- PaddleClas/tools/infer/utils.py +0 -233
- PaddleClas/tools/program.py +0 -444
- PaddleClas/tools/test_hubserving.py +0 -113
- PaddleClas/tools/train.py +0 -141
- paddlex/cls.py +0 -76
- paddlex/command.py +0 -215
- paddlex/cv/__init__.py +0 -17
- paddlex/cv/datasets/__init__.py +0 -18
- paddlex/cv/datasets/coco.py +0 -169
- paddlex/cv/datasets/imagenet.py +0 -88
- paddlex/cv/datasets/seg_dataset.py +0 -91
- paddlex/cv/datasets/voc.py +0 -301
- paddlex/cv/models/__init__.py +0 -18
- paddlex/cv/models/base.py +0 -623
- paddlex/cv/models/classifier.py +0 -814
- paddlex/cv/models/detector.py +0 -1747
- paddlex/cv/models/load_model.py +0 -126
- paddlex/cv/models/segmenter.py +0 -673
- paddlex/cv/models/slim/__init__.py +0 -13
- paddlex/cv/models/slim/prune.py +0 -55
- paddlex/cv/models/utils/__init__.py +0 -13
- paddlex/cv/models/utils/det_metrics/__init__.py +0 -15
- paddlex/cv/models/utils/det_metrics/coco_utils.py +0 -217
- paddlex/cv/models/utils/det_metrics/metrics.py +0 -220
- paddlex/cv/models/utils/ema.py +0 -48
- paddlex/cv/models/utils/seg_metrics.py +0 -62
- paddlex/cv/models/utils/visualize.py +0 -394
- paddlex/cv/transforms/__init__.py +0 -46
- paddlex/cv/transforms/batch_operators.py +0 -286
- paddlex/cv/transforms/box_utils.py +0 -41
- paddlex/cv/transforms/functions.py +0 -193
- paddlex/cv/transforms/operators.py +0 -1402
- paddlex/det.py +0 -43
- paddlex/paddleseg/__init__.py +0 -17
- paddlex/paddleseg/core/__init__.py +0 -20
- paddlex/paddleseg/core/infer.py +0 -289
- paddlex/paddleseg/core/predict.py +0 -145
- paddlex/paddleseg/core/train.py +0 -258
- paddlex/paddleseg/core/val.py +0 -172
- paddlex/paddleseg/cvlibs/__init__.py +0 -17
- paddlex/paddleseg/cvlibs/callbacks.py +0 -279
- paddlex/paddleseg/cvlibs/config.py +0 -359
- paddlex/paddleseg/cvlibs/manager.py +0 -142
- paddlex/paddleseg/cvlibs/param_init.py +0 -91
- paddlex/paddleseg/datasets/__init__.py +0 -21
- paddlex/paddleseg/datasets/ade.py +0 -112
- paddlex/paddleseg/datasets/cityscapes.py +0 -86
- paddlex/paddleseg/datasets/cocostuff.py +0 -79
- paddlex/paddleseg/datasets/dataset.py +0 -164
- paddlex/paddleseg/datasets/mini_deep_globe_road_extraction.py +0 -95
- paddlex/paddleseg/datasets/optic_disc_seg.py +0 -97
- paddlex/paddleseg/datasets/pascal_context.py +0 -80
- paddlex/paddleseg/datasets/voc.py +0 -113
- paddlex/paddleseg/models/__init__.py +0 -39
- paddlex/paddleseg/models/ann.py +0 -436
- paddlex/paddleseg/models/attention_unet.py +0 -189
- paddlex/paddleseg/models/backbones/__init__.py +0 -18
- paddlex/paddleseg/models/backbones/hrnet.py +0 -815
- paddlex/paddleseg/models/backbones/mobilenetv3.py +0 -365
- paddlex/paddleseg/models/backbones/resnet_vd.py +0 -364
- paddlex/paddleseg/models/backbones/xception_deeplab.py +0 -415
- paddlex/paddleseg/models/bisenet.py +0 -311
- paddlex/paddleseg/models/danet.py +0 -220
- paddlex/paddleseg/models/decoupled_segnet.py +0 -233
- paddlex/paddleseg/models/deeplab.py +0 -258
- paddlex/paddleseg/models/dnlnet.py +0 -231
- paddlex/paddleseg/models/emanet.py +0 -219
- paddlex/paddleseg/models/fast_scnn.py +0 -318
- paddlex/paddleseg/models/fcn.py +0 -135
- paddlex/paddleseg/models/gcnet.py +0 -223
- paddlex/paddleseg/models/gscnn.py +0 -357
- paddlex/paddleseg/models/hardnet.py +0 -309
- paddlex/paddleseg/models/isanet.py +0 -202
- paddlex/paddleseg/models/layers/__init__.py +0 -19
- paddlex/paddleseg/models/layers/activation.py +0 -73
- paddlex/paddleseg/models/layers/attention.py +0 -146
- paddlex/paddleseg/models/layers/layer_libs.py +0 -168
- paddlex/paddleseg/models/layers/nonlocal2d.py +0 -155
- paddlex/paddleseg/models/layers/pyramid_pool.py +0 -182
- paddlex/paddleseg/models/losses/__init__.py +0 -27
- paddlex/paddleseg/models/losses/binary_cross_entropy_loss.py +0 -174
- paddlex/paddleseg/models/losses/bootstrapped_cross_entropy.py +0 -73
- paddlex/paddleseg/models/losses/cross_entropy_loss.py +0 -94
- paddlex/paddleseg/models/losses/decoupledsegnet_relax_boundary_loss.py +0 -129
- paddlex/paddleseg/models/losses/dice_loss.py +0 -61
- paddlex/paddleseg/models/losses/edge_attention_loss.py +0 -78
- paddlex/paddleseg/models/losses/gscnn_dual_task_loss.py +0 -141
- paddlex/paddleseg/models/losses/l1_loss.py +0 -76
- paddlex/paddleseg/models/losses/lovasz_loss.py +0 -222
- paddlex/paddleseg/models/losses/mean_square_error_loss.py +0 -65
- paddlex/paddleseg/models/losses/mixed_loss.py +0 -58
- paddlex/paddleseg/models/losses/ohem_cross_entropy_loss.py +0 -99
- paddlex/paddleseg/models/losses/ohem_edge_attention_loss.py +0 -114
- paddlex/paddleseg/models/ocrnet.py +0 -248
- paddlex/paddleseg/models/pspnet.py +0 -147
- paddlex/paddleseg/models/sfnet.py +0 -236
- paddlex/paddleseg/models/shufflenet_slim.py +0 -268
- paddlex/paddleseg/models/u2net.py +0 -574
- paddlex/paddleseg/models/unet.py +0 -155
- paddlex/paddleseg/models/unet_3plus.py +0 -316
- paddlex/paddleseg/models/unet_plusplus.py +0 -237
- paddlex/paddleseg/transforms/__init__.py +0 -16
- paddlex/paddleseg/transforms/functional.py +0 -161
- paddlex/paddleseg/transforms/transforms.py +0 -937
- paddlex/paddleseg/utils/__init__.py +0 -22
- paddlex/paddleseg/utils/config_check.py +0 -60
- paddlex/paddleseg/utils/download.py +0 -163
- paddlex/paddleseg/utils/env/__init__.py +0 -16
- paddlex/paddleseg/utils/env/seg_env.py +0 -56
- paddlex/paddleseg/utils/env/sys_env.py +0 -122
- paddlex/paddleseg/utils/logger.py +0 -48
- paddlex/paddleseg/utils/metrics.py +0 -146
- paddlex/paddleseg/utils/progbar.py +0 -212
- paddlex/paddleseg/utils/timer.py +0 -53
- paddlex/paddleseg/utils/utils.py +0 -120
- paddlex/paddleseg/utils/visualize.py +0 -90
- paddlex/ppcls/__init__.py +0 -20
- paddlex/ppcls/data/__init__.py +0 -15
- paddlex/ppcls/data/imaug/__init__.py +0 -94
- paddlex/ppcls/data/imaug/autoaugment.py +0 -264
- paddlex/ppcls/data/imaug/batch_operators.py +0 -117
- paddlex/ppcls/data/imaug/cutout.py +0 -41
- paddlex/ppcls/data/imaug/fmix.py +0 -217
- paddlex/ppcls/data/imaug/grid.py +0 -89
- paddlex/ppcls/data/imaug/hide_and_seek.py +0 -44
- paddlex/ppcls/data/imaug/operators.py +0 -256
- paddlex/ppcls/data/imaug/randaugment.py +0 -106
- paddlex/ppcls/data/imaug/random_erasing.py +0 -55
- paddlex/ppcls/data/reader.py +0 -318
- paddlex/ppcls/modeling/__init__.py +0 -20
- paddlex/ppcls/modeling/architectures/__init__.py +0 -51
- paddlex/ppcls/modeling/architectures/alexnet.py +0 -132
- paddlex/ppcls/modeling/architectures/darknet.py +0 -161
- paddlex/ppcls/modeling/architectures/densenet.py +0 -308
- paddlex/ppcls/modeling/architectures/distillation_models.py +0 -65
- paddlex/ppcls/modeling/architectures/distilled_vision_transformer.py +0 -196
- paddlex/ppcls/modeling/architectures/dpn.py +0 -425
- paddlex/ppcls/modeling/architectures/efficientnet.py +0 -901
- paddlex/ppcls/modeling/architectures/ghostnet.py +0 -331
- paddlex/ppcls/modeling/architectures/googlenet.py +0 -207
- paddlex/ppcls/modeling/architectures/hrnet.py +0 -742
- paddlex/ppcls/modeling/architectures/inception_v3.py +0 -541
- paddlex/ppcls/modeling/architectures/inception_v4.py +0 -455
- paddlex/ppcls/modeling/architectures/mixnet.py +0 -782
- paddlex/ppcls/modeling/architectures/mobilenet_v1.py +0 -266
- paddlex/ppcls/modeling/architectures/mobilenet_v2.py +0 -248
- paddlex/ppcls/modeling/architectures/mobilenet_v3.py +0 -359
- paddlex/ppcls/modeling/architectures/regnet.py +0 -383
- paddlex/ppcls/modeling/architectures/repvgg.py +0 -339
- paddlex/ppcls/modeling/architectures/res2net.py +0 -272
- paddlex/ppcls/modeling/architectures/res2net_vd.py +0 -295
- paddlex/ppcls/modeling/architectures/resnest.py +0 -705
- paddlex/ppcls/modeling/architectures/resnet.py +0 -317
- paddlex/ppcls/modeling/architectures/resnet_vc.py +0 -309
- paddlex/ppcls/modeling/architectures/resnet_vd.py +0 -354
- paddlex/ppcls/modeling/architectures/resnext.py +0 -259
- paddlex/ppcls/modeling/architectures/resnext101_wsl.py +0 -447
- paddlex/ppcls/modeling/architectures/resnext_vd.py +0 -266
- paddlex/ppcls/modeling/architectures/rexnet.py +0 -240
- paddlex/ppcls/modeling/architectures/se_resnet_vd.py +0 -378
- paddlex/ppcls/modeling/architectures/se_resnext.py +0 -290
- paddlex/ppcls/modeling/architectures/se_resnext_vd.py +0 -285
- paddlex/ppcls/modeling/architectures/shufflenet_v2.py +0 -320
- paddlex/ppcls/modeling/architectures/squeezenet.py +0 -154
- paddlex/ppcls/modeling/architectures/vgg.py +0 -152
- paddlex/ppcls/modeling/architectures/vision_transformer.py +0 -402
- paddlex/ppcls/modeling/architectures/xception.py +0 -345
- paddlex/ppcls/modeling/architectures/xception_deeplab.py +0 -386
- paddlex/ppcls/modeling/loss.py +0 -158
- paddlex/ppcls/modeling/utils.py +0 -53
- paddlex/ppcls/optimizer/__init__.py +0 -19
- paddlex/ppcls/optimizer/learning_rate.py +0 -159
- paddlex/ppcls/optimizer/optimizer.py +0 -165
- paddlex/ppcls/utils/__init__.py +0 -27
- paddlex/ppcls/utils/check.py +0 -151
- paddlex/ppcls/utils/config.py +0 -201
- paddlex/ppcls/utils/logger.py +0 -120
- paddlex/ppcls/utils/metrics.py +0 -112
- paddlex/ppcls/utils/misc.py +0 -62
- paddlex/ppcls/utils/model_zoo.py +0 -213
- paddlex/ppcls/utils/save_load.py +0 -163
- paddlex/ppdet/__init__.py +0 -16
- paddlex/ppdet/core/__init__.py +0 -15
- paddlex/ppdet/core/config/__init__.py +0 -13
- paddlex/ppdet/core/config/schema.py +0 -248
- paddlex/ppdet/core/config/yaml_helpers.py +0 -118
- paddlex/ppdet/core/workspace.py +0 -279
- paddlex/ppdet/data/__init__.py +0 -21
- paddlex/ppdet/data/reader.py +0 -304
- paddlex/ppdet/data/shm_utils.py +0 -67
- paddlex/ppdet/data/source/__init__.py +0 -27
- paddlex/ppdet/data/source/category.py +0 -823
- paddlex/ppdet/data/source/coco.py +0 -243
- paddlex/ppdet/data/source/dataset.py +0 -192
- paddlex/ppdet/data/source/keypoint_coco.py +0 -656
- paddlex/ppdet/data/source/mot.py +0 -360
- paddlex/ppdet/data/source/voc.py +0 -204
- paddlex/ppdet/data/source/widerface.py +0 -180
- paddlex/ppdet/data/transform/__init__.py +0 -28
- paddlex/ppdet/data/transform/autoaugment_utils.py +0 -1593
- paddlex/ppdet/data/transform/batch_operators.py +0 -758
- paddlex/ppdet/data/transform/gridmask_utils.py +0 -83
- paddlex/ppdet/data/transform/keypoint_operators.py +0 -665
- paddlex/ppdet/data/transform/mot_operators.py +0 -636
- paddlex/ppdet/data/transform/op_helper.py +0 -468
- paddlex/ppdet/data/transform/operators.py +0 -2103
- paddlex/ppdet/engine/__init__.py +0 -29
- paddlex/ppdet/engine/callbacks.py +0 -262
- paddlex/ppdet/engine/env.py +0 -47
- paddlex/ppdet/engine/export_utils.py +0 -118
- paddlex/ppdet/engine/tracker.py +0 -425
- paddlex/ppdet/engine/trainer.py +0 -535
- paddlex/ppdet/metrics/__init__.py +0 -23
- paddlex/ppdet/metrics/coco_utils.py +0 -184
- paddlex/ppdet/metrics/json_results.py +0 -151
- paddlex/ppdet/metrics/keypoint_metrics.py +0 -202
- paddlex/ppdet/metrics/map_utils.py +0 -396
- paddlex/ppdet/metrics/metrics.py +0 -300
- paddlex/ppdet/metrics/mot_eval_utils.py +0 -192
- paddlex/ppdet/metrics/mot_metrics.py +0 -184
- paddlex/ppdet/metrics/widerface_utils.py +0 -393
- paddlex/ppdet/model_zoo/__init__.py +0 -18
- paddlex/ppdet/model_zoo/model_zoo.py +0 -86
- paddlex/ppdet/model_zoo/tests/__init__.py +0 -13
- paddlex/ppdet/model_zoo/tests/test_get_model.py +0 -48
- paddlex/ppdet/model_zoo/tests/test_list_model.py +0 -68
- paddlex/ppdet/modeling/__init__.py +0 -41
- paddlex/ppdet/modeling/architectures/__init__.py +0 -40
- paddlex/ppdet/modeling/architectures/cascade_rcnn.py +0 -144
- paddlex/ppdet/modeling/architectures/centernet.py +0 -103
- paddlex/ppdet/modeling/architectures/deepsort.py +0 -111
- paddlex/ppdet/modeling/architectures/fairmot.py +0 -107
- paddlex/ppdet/modeling/architectures/faster_rcnn.py +0 -106
- paddlex/ppdet/modeling/architectures/fcos.py +0 -105
- paddlex/ppdet/modeling/architectures/jde.py +0 -125
- paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py +0 -286
- paddlex/ppdet/modeling/architectures/keypoint_hrnet.py +0 -203
- paddlex/ppdet/modeling/architectures/mask_rcnn.py +0 -135
- paddlex/ppdet/modeling/architectures/meta_arch.py +0 -45
- paddlex/ppdet/modeling/architectures/s2anet.py +0 -103
- paddlex/ppdet/modeling/architectures/solov2.py +0 -110
- paddlex/ppdet/modeling/architectures/ssd.py +0 -84
- paddlex/ppdet/modeling/architectures/ttfnet.py +0 -98
- paddlex/ppdet/modeling/architectures/yolo.py +0 -104
- paddlex/ppdet/modeling/backbones/__init__.py +0 -37
- paddlex/ppdet/modeling/backbones/blazenet.py +0 -322
- paddlex/ppdet/modeling/backbones/darknet.py +0 -341
- paddlex/ppdet/modeling/backbones/dla.py +0 -244
- paddlex/ppdet/modeling/backbones/ghostnet.py +0 -476
- paddlex/ppdet/modeling/backbones/hrnet.py +0 -724
- paddlex/ppdet/modeling/backbones/mobilenet_v1.py +0 -410
- paddlex/ppdet/modeling/backbones/mobilenet_v3.py +0 -497
- paddlex/ppdet/modeling/backbones/name_adapter.py +0 -69
- paddlex/ppdet/modeling/backbones/res2net.py +0 -358
- paddlex/ppdet/modeling/backbones/resnet.py +0 -606
- paddlex/ppdet/modeling/backbones/senet.py +0 -140
- paddlex/ppdet/modeling/backbones/vgg.py +0 -216
- paddlex/ppdet/modeling/bbox_utils.py +0 -464
- paddlex/ppdet/modeling/heads/__init__.py +0 -41
- paddlex/ppdet/modeling/heads/bbox_head.py +0 -379
- paddlex/ppdet/modeling/heads/cascade_head.py +0 -285
- paddlex/ppdet/modeling/heads/centernet_head.py +0 -194
- paddlex/ppdet/modeling/heads/face_head.py +0 -113
- paddlex/ppdet/modeling/heads/fcos_head.py +0 -270
- paddlex/ppdet/modeling/heads/keypoint_hrhrnet_head.py +0 -108
- paddlex/ppdet/modeling/heads/mask_head.py +0 -253
- paddlex/ppdet/modeling/heads/roi_extractor.py +0 -111
- paddlex/ppdet/modeling/heads/s2anet_head.py +0 -845
- paddlex/ppdet/modeling/heads/solov2_head.py +0 -537
- paddlex/ppdet/modeling/heads/ssd_head.py +0 -175
- paddlex/ppdet/modeling/heads/ttf_head.py +0 -314
- paddlex/ppdet/modeling/heads/yolo_head.py +0 -124
- paddlex/ppdet/modeling/keypoint_utils.py +0 -302
- paddlex/ppdet/modeling/layers.py +0 -1142
- paddlex/ppdet/modeling/losses/__init__.py +0 -35
- paddlex/ppdet/modeling/losses/ctfocal_loss.py +0 -67
- paddlex/ppdet/modeling/losses/fairmot_loss.py +0 -41
- paddlex/ppdet/modeling/losses/fcos_loss.py +0 -225
- paddlex/ppdet/modeling/losses/iou_aware_loss.py +0 -48
- paddlex/ppdet/modeling/losses/iou_loss.py +0 -210
- paddlex/ppdet/modeling/losses/jde_loss.py +0 -182
- paddlex/ppdet/modeling/losses/keypoint_loss.py +0 -228
- paddlex/ppdet/modeling/losses/solov2_loss.py +0 -101
- paddlex/ppdet/modeling/losses/ssd_loss.py +0 -163
- paddlex/ppdet/modeling/losses/yolo_loss.py +0 -212
- paddlex/ppdet/modeling/mot/__init__.py +0 -25
- paddlex/ppdet/modeling/mot/matching/__init__.py +0 -19
- paddlex/ppdet/modeling/mot/matching/deepsort_matching.py +0 -382
- paddlex/ppdet/modeling/mot/matching/jde_matching.py +0 -145
- paddlex/ppdet/modeling/mot/motion/__init__.py +0 -17
- paddlex/ppdet/modeling/mot/motion/kalman_filter.py +0 -270
- paddlex/ppdet/modeling/mot/tracker/__init__.py +0 -23
- paddlex/ppdet/modeling/mot/tracker/base_jde_tracker.py +0 -267
- paddlex/ppdet/modeling/mot/tracker/base_sde_tracker.py +0 -145
- paddlex/ppdet/modeling/mot/tracker/deepsort_tracker.py +0 -165
- paddlex/ppdet/modeling/mot/tracker/jde_tracker.py +0 -262
- paddlex/ppdet/modeling/mot/utils.py +0 -181
- paddlex/ppdet/modeling/mot/visualization.py +0 -130
- paddlex/ppdet/modeling/necks/__init__.py +0 -25
- paddlex/ppdet/modeling/necks/centernet_fpn.py +0 -185
- paddlex/ppdet/modeling/necks/fpn.py +0 -233
- paddlex/ppdet/modeling/necks/hrfpn.py +0 -131
- paddlex/ppdet/modeling/necks/ttf_fpn.py +0 -243
- paddlex/ppdet/modeling/necks/yolo_fpn.py +0 -1034
- paddlex/ppdet/modeling/ops.py +0 -1599
- paddlex/ppdet/modeling/post_process.py +0 -449
- paddlex/ppdet/modeling/proposal_generator/__init__.py +0 -2
- paddlex/ppdet/modeling/proposal_generator/anchor_generator.py +0 -135
- paddlex/ppdet/modeling/proposal_generator/proposal_generator.py +0 -81
- paddlex/ppdet/modeling/proposal_generator/rpn_head.py +0 -269
- paddlex/ppdet/modeling/proposal_generator/target.py +0 -671
- paddlex/ppdet/modeling/proposal_generator/target_layer.py +0 -476
- paddlex/ppdet/modeling/reid/__init__.py +0 -23
- paddlex/ppdet/modeling/reid/fairmot_embedding_head.py +0 -117
- paddlex/ppdet/modeling/reid/jde_embedding_head.py +0 -189
- paddlex/ppdet/modeling/reid/pyramidal_embedding.py +0 -151
- paddlex/ppdet/modeling/reid/resnet.py +0 -320
- paddlex/ppdet/modeling/shape_spec.py +0 -33
- paddlex/ppdet/modeling/tests/__init__.py +0 -13
- paddlex/ppdet/modeling/tests/test_architectures.py +0 -59
- paddlex/ppdet/modeling/tests/test_base.py +0 -75
- paddlex/ppdet/modeling/tests/test_ops.py +0 -839
- paddlex/ppdet/modeling/tests/test_yolov3_loss.py +0 -420
- paddlex/ppdet/optimizer.py +0 -285
- paddlex/ppdet/slim/__init__.py +0 -62
- paddlex/ppdet/slim/distill.py +0 -111
- paddlex/ppdet/slim/prune.py +0 -85
- paddlex/ppdet/slim/quant.py +0 -52
- paddlex/ppdet/utils/__init__.py +0 -13
- paddlex/ppdet/utils/check.py +0 -93
- paddlex/ppdet/utils/checkpoint.py +0 -216
- paddlex/ppdet/utils/cli.py +0 -151
- paddlex/ppdet/utils/colormap.py +0 -56
- paddlex/ppdet/utils/download.py +0 -477
- paddlex/ppdet/utils/logger.py +0 -71
- paddlex/ppdet/utils/stats.py +0 -95
- paddlex/ppdet/utils/visualizer.py +0 -292
- paddlex/ppdet/utils/voc_utils.py +0 -87
- paddlex/seg.py +0 -38
- paddlex/tools/__init__.py +0 -16
- paddlex/tools/convert.py +0 -52
- paddlex/tools/dataset_conversion/__init__.py +0 -24
- paddlex/tools/dataset_conversion/x2coco.py +0 -379
- paddlex/tools/dataset_conversion/x2imagenet.py +0 -82
- paddlex/tools/dataset_conversion/x2seg.py +0 -343
- paddlex/tools/dataset_conversion/x2voc.py +0 -230
- paddlex/tools/dataset_split/__init__.py +0 -23
- paddlex/tools/dataset_split/coco_split.py +0 -69
- paddlex/tools/dataset_split/imagenet_split.py +0 -75
- paddlex/tools/dataset_split/seg_split.py +0 -96
- paddlex/tools/dataset_split/utils.py +0 -75
- paddlex/tools/dataset_split/voc_split.py +0 -91
- paddlex/tools/split.py +0 -41
- paddlex/utils/checkpoint.py +0 -439
- paddlex/utils/shm.py +0 -67
- paddlex/utils/stats.py +0 -68
- paddlex/utils/utils.py +0 -140
- paddlex-2.0.0rc4.dist-info/LICENSE +0 -201
- paddlex-2.0.0rc4.dist-info/METADATA +0 -29
- paddlex-2.0.0rc4.dist-info/RECORD +0 -445
- paddlex-2.0.0rc4.dist-info/WHEEL +0 -5
- paddlex-2.0.0rc4.dist-info/entry_points.txt +0 -3
- paddlex-2.0.0rc4.dist-info/top_level.txt +0 -2
@@ -0,0 +1,2149 @@
|
|
1
|
+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import bisect
|
16
|
+
import functools
|
17
|
+
import inspect
|
18
|
+
import io
|
19
|
+
import itertools
|
20
|
+
import json
|
21
|
+
import os
|
22
|
+
import re
|
23
|
+
import unicodedata
|
24
|
+
from collections import OrderedDict
|
25
|
+
from dataclasses import asdict, dataclass
|
26
|
+
from functools import lru_cache
|
27
|
+
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
28
|
+
|
29
|
+
import numpy as np
|
30
|
+
|
31
|
+
from .....utils import logging
|
32
|
+
from .....utils.deps import class_requires_deps, is_dep_available
|
33
|
+
from .tokenizer_utils_base import (
|
34
|
+
CHAT_TEMPLATE_CONFIG_NAME,
|
35
|
+
AddedToken,
|
36
|
+
BatchEncoding,
|
37
|
+
EncodedInput,
|
38
|
+
EncodedInputPair,
|
39
|
+
PaddingStrategy,
|
40
|
+
PreTokenizedInput,
|
41
|
+
PreTokenizedInputPair,
|
42
|
+
PretrainedTokenizerBase,
|
43
|
+
TensorType,
|
44
|
+
TextInput,
|
45
|
+
TextInputPair,
|
46
|
+
TruncationStrategy,
|
47
|
+
)
|
48
|
+
from .utils import convert_to_dict_message, fn_args_to_dict
|
49
|
+
from .vocab import Vocab
|
50
|
+
|
51
|
+
if is_dep_available("Jinja2"):
|
52
|
+
from jinja2 import Template
|
53
|
+
from jinja2.exceptions import TemplateError, TemplateSyntaxError
|
54
|
+
from jinja2.sandbox import ImmutableSandboxedEnvironment
|
55
|
+
|
56
|
+
__all__ = [
|
57
|
+
"ChatTemplate",
|
58
|
+
"Trie",
|
59
|
+
"ChatTemplateMixin",
|
60
|
+
"PretrainedTokenizer",
|
61
|
+
"InitTrackerMeta",
|
62
|
+
]
|
63
|
+
|
64
|
+
|
65
|
+
@class_requires_deps("Jinja2")
|
66
|
+
@dataclass
|
67
|
+
class ChatTemplate:
|
68
|
+
conversation: Union[List[str], None] = None
|
69
|
+
system: Union[str, None] = None
|
70
|
+
query: str = None
|
71
|
+
|
72
|
+
@staticmethod
|
73
|
+
@lru_cache()
|
74
|
+
def _compile_jinja_template(chat_template) -> "Template":
|
75
|
+
def raise_exception(message):
|
76
|
+
raise TemplateError(message)
|
77
|
+
|
78
|
+
jinja_env = ImmutableSandboxedEnvironment(
|
79
|
+
trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True
|
80
|
+
)
|
81
|
+
jinja_env.globals["raise_exception"] = raise_exception
|
82
|
+
return jinja_env.from_string(chat_template)
|
83
|
+
|
84
|
+
def render_conversation(
|
85
|
+
self,
|
86
|
+
conversation_data: Union[List[str], Dict[str, str]],
|
87
|
+
index: int = 0,
|
88
|
+
context_data: Dict[str, Any] = {},
|
89
|
+
) -> List[str]:
|
90
|
+
"""
|
91
|
+
Args:
|
92
|
+
conversation_data (list[str]): the conversation data which must be two parts
|
93
|
+
index (int): the index of current conversation
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
list[str]: the rendered conversation data
|
97
|
+
"""
|
98
|
+
if self.conversation is None:
|
99
|
+
raise ValueError(
|
100
|
+
"The template for multi-turns is invalid, please check `conversation` filed in your chat-template."
|
101
|
+
)
|
102
|
+
|
103
|
+
if isinstance(conversation_data, (list, tuple)):
|
104
|
+
assert (
|
105
|
+
len(conversation_data) == 2
|
106
|
+
), "Each round/turn of conversation must be two participants, eg: [user-query, bot-query]"
|
107
|
+
|
108
|
+
conversation_data = {
|
109
|
+
"user": conversation_data[0],
|
110
|
+
"bot": conversation_data[1],
|
111
|
+
"index": index,
|
112
|
+
}
|
113
|
+
conversation_data.update(context_data)
|
114
|
+
|
115
|
+
one_turn_conversation = []
|
116
|
+
for conversation in self.conversation:
|
117
|
+
template = self._compile_jinja_template(conversation)
|
118
|
+
result = template.render(conversation_data)
|
119
|
+
one_turn_conversation.append(result)
|
120
|
+
return one_turn_conversation
|
121
|
+
|
122
|
+
def render_query(
|
123
|
+
self, query: str, index: int = 0, context_data: Dict[str, Union[int, str]] = {}
|
124
|
+
):
|
125
|
+
if self.query is None:
|
126
|
+
return query
|
127
|
+
|
128
|
+
template = self._compile_jinja_template(self.query)
|
129
|
+
return template.render(query=query, index=index, **context_data)
|
130
|
+
|
131
|
+
def _init_context_data(
|
132
|
+
self, context_data: Dict[str, Union[int, str]] = {}
|
133
|
+
) -> Dict[str, Union[int, str]]:
|
134
|
+
"""init the context data for chat-template"""
|
135
|
+
context_data["is_training"] = context_data.get("is_training", False)
|
136
|
+
return context_data
|
137
|
+
|
138
|
+
def render_system(self, context_data: Dict[str, Union[int, str]] = {}) -> str:
|
139
|
+
if self.system is None:
|
140
|
+
return ""
|
141
|
+
|
142
|
+
template = self._compile_jinja_template(self.system)
|
143
|
+
return template.render(**context_data)
|
144
|
+
|
145
|
+
def __call__(
|
146
|
+
self,
|
147
|
+
conversations: Union[List[List[str]], str],
|
148
|
+
context_data: Dict[str, Union[int, str]] = {},
|
149
|
+
) -> str:
|
150
|
+
"""render the conversations by chat-template
|
151
|
+
|
152
|
+
Args:
|
153
|
+
conversations (list[list[str]]): the conversations of use and bot
|
154
|
+
|
155
|
+
Returns:
|
156
|
+
str: the result of conversation
|
157
|
+
"""
|
158
|
+
if isinstance(conversations, str):
|
159
|
+
conversations = [[conversations]]
|
160
|
+
|
161
|
+
# [1 ... n-1] conversation
|
162
|
+
final_query = self.render_system(context_data=context_data)
|
163
|
+
context_data["length"] = len(conversations)
|
164
|
+
for index, conversation in enumerate(conversations[:-1]):
|
165
|
+
context_data["is_first"] = index == 0
|
166
|
+
context_data["is_last"] = False
|
167
|
+
final_query += "".join(
|
168
|
+
self.render_conversation(
|
169
|
+
conversation, index=index, context_data=context_data
|
170
|
+
)
|
171
|
+
)
|
172
|
+
|
173
|
+
if not isinstance(conversations[-1], list) and not len(conversations[-1]) != 1:
|
174
|
+
raise ValueError(
|
175
|
+
"The length of last conversation must be one, eg: [[user-query, bot-answer], [user-query, bot-answer], ..., [user-query]]"
|
176
|
+
)
|
177
|
+
if len(conversations[-1]) > 1:
|
178
|
+
logging.warning(
|
179
|
+
f"The last conversation is not a single-round, chat-template will skip the conversation: {conversations[-1][1:]}"
|
180
|
+
)
|
181
|
+
|
182
|
+
final_query += self.render_query(
|
183
|
+
conversations[-1][0],
|
184
|
+
index=len(conversations) - 1,
|
185
|
+
context_data=context_data,
|
186
|
+
)
|
187
|
+
return final_query
|
188
|
+
|
189
|
+
@classmethod
|
190
|
+
def from_dict(cls, config: Dict):
|
191
|
+
return cls(**config)
|
192
|
+
|
193
|
+
@classmethod
|
194
|
+
def from_file(cls, file: str):
|
195
|
+
with open(file, "r", encoding="utf-8") as f:
|
196
|
+
config = json.load(f)
|
197
|
+
return cls.from_dict(config)
|
198
|
+
|
199
|
+
|
200
|
+
def adapt_stale_fwd_patch(self, name, value):
|
201
|
+
"""
|
202
|
+
Since there are some monkey patches for forward of PretrainedModel, such as
|
203
|
+
model compression, we make these patches compatible with the latest forward
|
204
|
+
method.
|
205
|
+
"""
|
206
|
+
|
207
|
+
if name == "forward":
|
208
|
+
# NOTE(guosheng): In dygraph to static, `layer.forward` would be patched
|
209
|
+
# by an instance of `StaticFunction`. And use string compare to avoid to
|
210
|
+
# import fluid.
|
211
|
+
if type(value).__name__.endswith(
|
212
|
+
"StaticFunction"
|
213
|
+
) or self.forward.__class__.__name__.endswith("StaticFunction"):
|
214
|
+
return value
|
215
|
+
(
|
216
|
+
patch_spec_args,
|
217
|
+
patch_spec_varargs,
|
218
|
+
patch_spec_varkw,
|
219
|
+
patch_spec_defaults,
|
220
|
+
_,
|
221
|
+
_,
|
222
|
+
_,
|
223
|
+
) = inspect.getfullargspec(value)
|
224
|
+
(spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _) = (
|
225
|
+
inspect.getfullargspec(self.forward)
|
226
|
+
)
|
227
|
+
new_args = [
|
228
|
+
arg
|
229
|
+
for arg in ("output_hidden_states", "output_attentions", "return_dict")
|
230
|
+
if arg not in patch_spec_args and arg in spec_args
|
231
|
+
]
|
232
|
+
|
233
|
+
if new_args:
|
234
|
+
import paddle
|
235
|
+
|
236
|
+
if self.__module__.startswith("paddlenlp"):
|
237
|
+
logging.warning(
|
238
|
+
f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} is patched and the patch "
|
239
|
+
"might be based on an old oversion which missing some "
|
240
|
+
f"arguments compared with the latest, such as {new_args}. "
|
241
|
+
"We automatically add compatibility on the patch for "
|
242
|
+
"these arguments, and maybe the patch should be updated."
|
243
|
+
)
|
244
|
+
else:
|
245
|
+
logging.warning(
|
246
|
+
f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} "
|
247
|
+
"is patched and the patch might be conflict with patches made "
|
248
|
+
f"by paddlenlp which seems have more arguments such as {new_args}. "
|
249
|
+
"We automatically add compatibility on the patch for "
|
250
|
+
"these arguments, and maybe the patch should be updated."
|
251
|
+
)
|
252
|
+
if isinstance(self, paddle.nn.Layer) and inspect.isfunction(value):
|
253
|
+
|
254
|
+
@functools.wraps(value)
|
255
|
+
def wrap_fwd(*args, **kwargs):
|
256
|
+
for arg in new_args:
|
257
|
+
kwargs.pop(arg, None)
|
258
|
+
return value(self, *args, **kwargs)
|
259
|
+
|
260
|
+
else:
|
261
|
+
|
262
|
+
@functools.wraps(value)
|
263
|
+
def wrap_fwd(*args, **kwargs):
|
264
|
+
for arg in new_args:
|
265
|
+
kwargs.pop(arg, None)
|
266
|
+
return value(*args, **kwargs)
|
267
|
+
|
268
|
+
return wrap_fwd
|
269
|
+
return value
|
270
|
+
|
271
|
+
|
272
|
+
# NOTE:
|
273
|
+
# Modification:
|
274
|
+
# class InitTrackerMeta(type(paddle.nn.Layer)) -> class InitTrackerMeta(type)
|
275
|
+
# Context:
|
276
|
+
# 1. In paddle 3.0rc, type(paddle.nn.Layer) == type
|
277
|
+
# 2. Solve the conflict between ultra-infer and paddle
|
278
|
+
class InitTrackerMeta(type):
|
279
|
+
"""
|
280
|
+
This metaclass wraps the `__init__` method of a class to add `init_config`
|
281
|
+
attribute for instances of that class, and `init_config` use a dict to track
|
282
|
+
the initial configuration. If the class has `_pre_init` or `_post_init`
|
283
|
+
method, it would be hooked before or after `__init__` and called as
|
284
|
+
`_pre_init(self, init_fn, init_args)` or `_post_init(self, init_fn, init_args)`.
|
285
|
+
Since InitTrackerMeta would be used as metaclass for pretrained model classes,
|
286
|
+
which always are Layer and `type(Layer)` is not `type`, thus use `type(Layer)`
|
287
|
+
rather than `type` as base class for it to avoid inheritance metaclass
|
288
|
+
conflicts.
|
289
|
+
"""
|
290
|
+
|
291
|
+
def __init__(cls, name, bases, attrs):
|
292
|
+
init_func = cls.__init__
|
293
|
+
# If attrs has `__init__`, wrap it using accessible `_pre_init, _post_init`.
|
294
|
+
# Otherwise, no need to wrap again since the super cls has been wrapped.
|
295
|
+
# TODO: remove reduplicated tracker if using super cls `__init__`
|
296
|
+
pre_init_func = getattr(cls, "_pre_init", None) if "__init__" in attrs else None
|
297
|
+
post_init_func = (
|
298
|
+
getattr(cls, "_post_init", None) if "__init__" in attrs else None
|
299
|
+
)
|
300
|
+
cls.__init__ = InitTrackerMeta.init_and_track_conf(
|
301
|
+
init_func, pre_init_func, post_init_func
|
302
|
+
)
|
303
|
+
super(InitTrackerMeta, cls).__init__(name, bases, attrs)
|
304
|
+
|
305
|
+
@staticmethod
|
306
|
+
def init_and_track_conf(init_func, pre_init_func=None, post_init_func=None):
|
307
|
+
"""
|
308
|
+
wraps `init_func` which is `__init__` method of a class to add `init_config`
|
309
|
+
attribute for instances of that class.
|
310
|
+
Args:
|
311
|
+
init_func (callable): It should be the `__init__` method of a class.
|
312
|
+
warning: `self` always is the class type of down-stream model, eg: BertForTokenClassification
|
313
|
+
pre_init_func (callable, optional): If provided, it would be hooked after
|
314
|
+
`init_func` and called as `pre_init_func(self, init_func, *init_args, **init_args)`.
|
315
|
+
Default None.
|
316
|
+
post_init_func (callable, optional): If provided, it would be hooked after
|
317
|
+
`init_func` and called as `post_init_func(self, init_func, *init_args, **init_args)`.
|
318
|
+
Default None.
|
319
|
+
|
320
|
+
Returns:
|
321
|
+
function: the wrapped function
|
322
|
+
"""
|
323
|
+
|
324
|
+
@functools.wraps(init_func)
|
325
|
+
def __impl__(self, *args, **kwargs):
|
326
|
+
# registered helper by `pre_init_func`
|
327
|
+
if pre_init_func:
|
328
|
+
pre_init_func(self, init_func, *args, **kwargs)
|
329
|
+
# keep full configuration
|
330
|
+
init_func(self, *args, **kwargs)
|
331
|
+
# registered helper by `post_init_func`
|
332
|
+
if post_init_func:
|
333
|
+
post_init_func(self, init_func, *args, **kwargs)
|
334
|
+
self.init_config = kwargs
|
335
|
+
if args:
|
336
|
+
kwargs["init_args"] = args
|
337
|
+
kwargs["init_class"] = self.__class__.__name__
|
338
|
+
|
339
|
+
return __impl__
|
340
|
+
|
341
|
+
def __setattr__(self, name, value):
|
342
|
+
value = adapt_stale_fwd_patch(self, name, value)
|
343
|
+
return super(InitTrackerMeta, self).__setattr__(name, value)
|
344
|
+
|
345
|
+
|
346
|
+
class Trie:
|
347
|
+
"""
|
348
|
+
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
|
349
|
+
Loose reference https://en.wikipedia.org/wiki/Trie
|
350
|
+
"""
|
351
|
+
|
352
|
+
def __init__(self):
|
353
|
+
self.data = {}
|
354
|
+
|
355
|
+
def add(self, word: str):
|
356
|
+
"""
|
357
|
+
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
|
358
|
+
The special key `""` is used to represent termination.
|
359
|
+
|
360
|
+
This function is idempotent, adding twice the same word will leave the trie unchanged
|
361
|
+
|
362
|
+
Example:
|
363
|
+
|
364
|
+
```python
|
365
|
+
>>> trie = Trie()
|
366
|
+
>>> trie.add("Hello 友達")
|
367
|
+
>>> trie.data
|
368
|
+
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
|
369
|
+
|
370
|
+
>>> trie.add("Hello")
|
371
|
+
>>> trie.data
|
372
|
+
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
|
373
|
+
```
|
374
|
+
"""
|
375
|
+
if not word:
|
376
|
+
# Prevent empty string
|
377
|
+
return
|
378
|
+
ref = self.data
|
379
|
+
for char in word:
|
380
|
+
ref[char] = char in ref and ref[char] or {}
|
381
|
+
ref = ref[char]
|
382
|
+
ref[""] = 1
|
383
|
+
|
384
|
+
def split(self, text: str) -> List[str]:
|
385
|
+
"""
|
386
|
+
Will look for the words added to the trie within `text`. Output is the original string splitted along the
|
387
|
+
boundaries of the words found.
|
388
|
+
|
389
|
+
This trie will match the longest possible word first !
|
390
|
+
|
391
|
+
Example:
|
392
|
+
|
393
|
+
```python
|
394
|
+
>>> trie = Trie()
|
395
|
+
>>> trie.split("[CLS] This is a extra_id_100")
|
396
|
+
["[CLS] This is a extra_id_100"]
|
397
|
+
|
398
|
+
>>> trie.add("[CLS]")
|
399
|
+
>>> trie.add("extra_id_1")
|
400
|
+
>>> trie.add("extra_id_100")
|
401
|
+
>>> trie.split("[CLS] This is a extra_id_100")
|
402
|
+
["[CLS]", " This is a ", "extra_id_100"]
|
403
|
+
```
|
404
|
+
"""
|
405
|
+
# indexes are counted left of the chars index.
|
406
|
+
# "hello", index 0, is left of h, index 1 is between h and e.
|
407
|
+
# index 5 is right of the "o".
|
408
|
+
|
409
|
+
# States are going to capture every possible start (indexes as above)
|
410
|
+
# as keys, and have as values, a pointer to the position in the trie
|
411
|
+
# where we're at. This is a partial match for now.
|
412
|
+
# This enables to keep track of multiple matches while we're iterating
|
413
|
+
# the string
|
414
|
+
# If the trie contains, "blowing", and "lower" and we encounter the
|
415
|
+
# string "blower", we need to split into ["b", "lower"].
|
416
|
+
# This is where we need to keep track of multiple possible starts.
|
417
|
+
states = OrderedDict()
|
418
|
+
|
419
|
+
# This will contain every indices where we need
|
420
|
+
# to cut.
|
421
|
+
# We force to cut at offset 0 and len(text) (added later)
|
422
|
+
offsets = [0]
|
423
|
+
|
424
|
+
# This is used by the lookahead which needs to skip over
|
425
|
+
# some text where the full match exceeded the place in the initial
|
426
|
+
# for loop
|
427
|
+
skip = 0
|
428
|
+
# Main loop, Giving this algorithm O(n) complexity
|
429
|
+
for current, current_char in enumerate(text):
|
430
|
+
if skip and current < skip:
|
431
|
+
# Prevents the lookahead for matching twice
|
432
|
+
# like extra_id_100 and id_100
|
433
|
+
continue
|
434
|
+
|
435
|
+
# This will track every state
|
436
|
+
# that stop matching, we need to stop tracking them.
|
437
|
+
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
|
438
|
+
# fail on "b", we need to remove 0 from the valid states.
|
439
|
+
to_remove = set()
|
440
|
+
# Whenever we found a match, we need to drop everything
|
441
|
+
# this is a greedy algorithm, it will match on the first found token
|
442
|
+
reset = False
|
443
|
+
|
444
|
+
# In this case, we already have partial matches (But unfinished)
|
445
|
+
for start, trie_pointer in states.items():
|
446
|
+
if "" in trie_pointer:
|
447
|
+
# This is a final match, we need to reset and
|
448
|
+
# store the results in `offsets`.
|
449
|
+
|
450
|
+
# Lookahead to match longest first
|
451
|
+
# Important in case of extra_id_1 vs extra_id_100
|
452
|
+
# Here we are also actively looking for other earlier partial
|
453
|
+
# matches
|
454
|
+
# "[CLS]", "L", we need to match CLS even if L is special
|
455
|
+
for lookstart, looktrie_pointer in states.items():
|
456
|
+
if lookstart > start:
|
457
|
+
# This partial match is later, we can stop looking
|
458
|
+
break
|
459
|
+
elif lookstart < start:
|
460
|
+
# This partial match is earlier, the trie pointer
|
461
|
+
# was already updated, so index is + 1
|
462
|
+
lookahead_index = current + 1
|
463
|
+
end = current + 1
|
464
|
+
else:
|
465
|
+
# Here lookstart == start and
|
466
|
+
# looktrie_pointer == trie_pointer
|
467
|
+
# It wasn't updated yet so indices are current ones
|
468
|
+
lookahead_index = current
|
469
|
+
end = current
|
470
|
+
next_char = (
|
471
|
+
text[lookahead_index]
|
472
|
+
if lookahead_index < len(text)
|
473
|
+
else None
|
474
|
+
)
|
475
|
+
if "" in looktrie_pointer:
|
476
|
+
start = lookstart
|
477
|
+
end = lookahead_index
|
478
|
+
skip = lookahead_index
|
479
|
+
|
480
|
+
while next_char in looktrie_pointer:
|
481
|
+
looktrie_pointer = looktrie_pointer[next_char]
|
482
|
+
lookahead_index += 1
|
483
|
+
if "" in looktrie_pointer:
|
484
|
+
start = lookstart
|
485
|
+
end = lookahead_index
|
486
|
+
skip = lookahead_index
|
487
|
+
|
488
|
+
if lookahead_index == len(text):
|
489
|
+
# End of string
|
490
|
+
break
|
491
|
+
next_char = text[lookahead_index]
|
492
|
+
# End lookahead
|
493
|
+
|
494
|
+
# Storing and resetting
|
495
|
+
offsets.append(start)
|
496
|
+
offsets.append(end)
|
497
|
+
reset = True
|
498
|
+
break
|
499
|
+
elif current_char in trie_pointer:
|
500
|
+
# The current character being looked at has a match within the trie
|
501
|
+
# update the pointer (it will be stored back into states later).
|
502
|
+
trie_pointer = trie_pointer[current_char]
|
503
|
+
|
504
|
+
# Storing back the new pointer into the states.
|
505
|
+
# Partial matches got longer by one.
|
506
|
+
states[start] = trie_pointer
|
507
|
+
else:
|
508
|
+
# The new character has not match in the trie, we need
|
509
|
+
# to stop keeping track of this partial match.
|
510
|
+
# We can't do it directly within the loop because of how
|
511
|
+
# python iteration works
|
512
|
+
to_remove.add(start)
|
513
|
+
|
514
|
+
# Either clearing the full start (we found a real match)
|
515
|
+
# Or clearing only the partial matches that didn't work.
|
516
|
+
if reset:
|
517
|
+
states = {}
|
518
|
+
else:
|
519
|
+
for start in to_remove:
|
520
|
+
del states[start]
|
521
|
+
|
522
|
+
# If this character is a starting character within the trie
|
523
|
+
# start keeping track of this partial match.
|
524
|
+
if current >= skip and current_char in self.data:
|
525
|
+
states[current] = self.data[current_char]
|
526
|
+
|
527
|
+
# We have a cut at the end with states.
|
528
|
+
for start, trie_pointer in states.items():
|
529
|
+
if "" in trie_pointer:
|
530
|
+
# This is a final match, we need to reset and
|
531
|
+
# store the results in `offsets`.
|
532
|
+
end = len(text)
|
533
|
+
offsets.append(start)
|
534
|
+
offsets.append(end)
|
535
|
+
# Longest cut is always the one with lower start so the first
|
536
|
+
# item so we need to break.
|
537
|
+
break
|
538
|
+
|
539
|
+
return self.cut_text(text, offsets)
|
540
|
+
|
541
|
+
def cut_text(self, text, offsets):
|
542
|
+
# We have all the offsets now, we just need to do the actual splitting.
|
543
|
+
# We need to eventually add the first part of the string and the eventual
|
544
|
+
# last part.
|
545
|
+
offsets.append(len(text))
|
546
|
+
tokens = []
|
547
|
+
start = 0
|
548
|
+
for end in offsets:
|
549
|
+
if start > end:
|
550
|
+
logging.error(
|
551
|
+
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
|
552
|
+
)
|
553
|
+
continue
|
554
|
+
elif start == end:
|
555
|
+
# This might happen if there's a match at index 0
|
556
|
+
# we're also preventing zero-width cuts in case of two
|
557
|
+
# consecutive matches
|
558
|
+
continue
|
559
|
+
tokens.append(text[start:end])
|
560
|
+
start = end
|
561
|
+
|
562
|
+
return tokens
|
563
|
+
|
564
|
+
|
565
|
+
def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
|
566
|
+
"""
|
567
|
+
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
|
568
|
+
"""
|
569
|
+
insertion_idx = bisect.bisect_left(token_list, new_token)
|
570
|
+
# Checks if new_token is already in the ordered token_list
|
571
|
+
if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
|
572
|
+
# new_token is in token_list, don't add
|
573
|
+
return
|
574
|
+
else:
|
575
|
+
token_list.insert(insertion_idx, new_token)
|
576
|
+
|
577
|
+
|
578
|
+
def _is_control(char):
|
579
|
+
"""Checks whether `chars` is a control character."""
|
580
|
+
# These are technically control characters but we count them as whitespace
|
581
|
+
# characters.
|
582
|
+
if char == "\t" or char == "\n" or char == "\r":
|
583
|
+
return False
|
584
|
+
cat = unicodedata.category(char)
|
585
|
+
if cat.startswith("C"):
|
586
|
+
return True
|
587
|
+
return False
|
588
|
+
|
589
|
+
|
590
|
+
def _is_nonnormalized_char(char):
|
591
|
+
"""Check whether `chars` is a non-normalized character."""
|
592
|
+
cp = ord(char)
|
593
|
+
if (
|
594
|
+
(0xFF00 <= cp <= 0xFFEF)
|
595
|
+
or (0xFE50 <= cp <= 0xFE6B) # Halfwidth and Fullwidth Forms
|
596
|
+
or (0x3358 <= cp <= 0x33FF) # Small Form Variants
|
597
|
+
or (0x249C <= cp <= 0x24E9) # CJK Compatibility
|
598
|
+
or (0x3200 <= cp <= 0x32FF) # Enclosed Alphanumerics: Ⓛ ⒰
|
599
|
+
): # Enclosed CJK Letters and Months
|
600
|
+
return True
|
601
|
+
|
602
|
+
return False
|
603
|
+
|
604
|
+
|
605
|
+
def _is_nonnormalized_numeric(char):
|
606
|
+
"""Check whether `chars` is a non-normalized numeric character."""
|
607
|
+
cp = ord(char)
|
608
|
+
if (
|
609
|
+
(0x2460 <= cp <= 0x249B)
|
610
|
+
or (0x24EA <= cp <= 0x24FF) #
|
611
|
+
or (0x2776 <= cp <= 0x2793) #
|
612
|
+
or (0x2160 <= cp <= 0x217F) # Enclosed Alphanumerics
|
613
|
+
): # Number Forms
|
614
|
+
return True
|
615
|
+
|
616
|
+
return False
|
617
|
+
|
618
|
+
|
619
|
+
def normalize_chars(text):
|
620
|
+
"""
|
621
|
+
Normalize the text for multiligual and chinese models. Unicode range:
|
622
|
+
https://www.ling.upenn.edu/courses/Spring_2003/ling538/UnicodeRanges.html
|
623
|
+
"""
|
624
|
+
output = []
|
625
|
+
for char in text:
|
626
|
+
if _is_nonnormalized_char(char):
|
627
|
+
for c in unicodedata.normalize("NFKC", char):
|
628
|
+
output.append(c)
|
629
|
+
elif _is_nonnormalized_numeric(char):
|
630
|
+
output.append(" ")
|
631
|
+
for c in str(int(unicodedata.numeric(char))):
|
632
|
+
output.append(c)
|
633
|
+
output.append(" ")
|
634
|
+
elif ord(char) == 0xF979: # https://www.zhihu.com/question/20697984
|
635
|
+
output.append("凉")
|
636
|
+
else:
|
637
|
+
output.append(char)
|
638
|
+
return "".join(output)
|
639
|
+
|
640
|
+
|
641
|
+
class ChatTemplateMixin:
|
642
|
+
chat_template: Optional[ChatTemplate] = None
|
643
|
+
|
644
|
+
def apply_chat_template(
|
645
|
+
self,
|
646
|
+
conversation: Union[List[List[str]], Dict[str, str], str],
|
647
|
+
tokenize: bool = True,
|
648
|
+
context_data: Dict[str, Any] = {},
|
649
|
+
**tokenizer_kwargs,
|
650
|
+
):
|
651
|
+
"""apply chat_template rules to conversation which should not be batched data
|
652
|
+
|
653
|
+
Args:
|
654
|
+
conversation (List[List[str]] , str): the conversation messages between user and bot
|
655
|
+
context_data (Dict[str, Any]): the context data for chat_template.json
|
656
|
+
tokenize (bool, optional): whether do tokenization. Defaults to True.
|
657
|
+
|
658
|
+
Returns:
|
659
|
+
str | dict[str, Union[numpy.ndarray, paddle.Tensor]]: return the result of applied data
|
660
|
+
"""
|
661
|
+
if not self.chat_template:
|
662
|
+
raise ValueError(
|
663
|
+
"chat_template is not set, please set chat_template first."
|
664
|
+
)
|
665
|
+
elif isinstance(self.chat_template, Template):
|
666
|
+
add_generation_prompt = tokenizer_kwargs.pop("add_generation_prompt", True)
|
667
|
+
query = self._apply_chat_template(
|
668
|
+
conversation, add_generation_prompt=add_generation_prompt
|
669
|
+
)
|
670
|
+
elif isinstance(self.chat_template, ChatTemplate):
|
671
|
+
query = self._apply_chat_template_paddle(conversation, context_data)
|
672
|
+
|
673
|
+
if not tokenize:
|
674
|
+
return query
|
675
|
+
|
676
|
+
# chat_template should not add special tokens
|
677
|
+
tokenizer_kwargs["add_special_tokens"] = False
|
678
|
+
return self(query, **tokenizer_kwargs)
|
679
|
+
|
680
|
+
def _apply_chat_template_paddle(
|
681
|
+
self,
|
682
|
+
conversation: Union[List[List[str]], str],
|
683
|
+
context_data: Dict[str, Any] = {},
|
684
|
+
):
|
685
|
+
context_data = self.chat_template._init_context_data(context_data)
|
686
|
+
|
687
|
+
if isinstance(conversation, str):
|
688
|
+
conversation = [[conversation]]
|
689
|
+
elif isinstance(conversation, list) and isinstance(conversation[0], str):
|
690
|
+
raise ValueError(
|
691
|
+
"apply_chat_template do not support applying batch conversations, "
|
692
|
+
"so you should apply the conversation one by one."
|
693
|
+
)
|
694
|
+
|
695
|
+
query = self.chat_template(conversation, context_data=context_data)
|
696
|
+
return query
|
697
|
+
|
698
|
+
def _apply_chat_template(
|
699
|
+
self,
|
700
|
+
conversation: Union[List[List[str]], Dict[str, str], str],
|
701
|
+
add_generation_prompt=True,
|
702
|
+
):
|
703
|
+
if isinstance(conversation, str):
|
704
|
+
conversations = [{"role": "user", "content": conversation}]
|
705
|
+
elif isinstance(conversation, list):
|
706
|
+
assert len(conversation) > 0, "empty conversation is not allowed"
|
707
|
+
if isinstance(conversation[0], list):
|
708
|
+
conversations = convert_to_dict_message(conversation)
|
709
|
+
elif isinstance(conversation[0], dict):
|
710
|
+
conversations = conversation
|
711
|
+
else:
|
712
|
+
raise ValueError(
|
713
|
+
"apply_chat_template do not support applying batch conversations, "
|
714
|
+
"so you should apply the conversation one by one."
|
715
|
+
)
|
716
|
+
query = self.chat_template.render(
|
717
|
+
messages=conversations,
|
718
|
+
**self.special_tokens_map,
|
719
|
+
add_generation_prompt=add_generation_prompt,
|
720
|
+
)
|
721
|
+
return query
|
722
|
+
|
723
|
+
def encode_chat_inputs(
|
724
|
+
self,
|
725
|
+
conversations: List[List[str]],
|
726
|
+
context_data: Dict[str, Any] = {},
|
727
|
+
**kwargs,
|
728
|
+
):
|
729
|
+
"""Encodes conversation to pairs of token ids.
|
730
|
+
Turn 0: bos + system + sep + user bot + eos
|
731
|
+
Turn t: sep + bot + query bot + eos
|
732
|
+
|
733
|
+
Args:
|
734
|
+
conversation (List[List[str]]): the conversation of data
|
735
|
+
context_data (Dict[str, Any]): the context data of conversation
|
736
|
+
|
737
|
+
Returns:
|
738
|
+
List[list[int], list[int]]: the pair of input_ids and target_ids
|
739
|
+
"""
|
740
|
+
if not self.chat_template:
|
741
|
+
raise ValueError(
|
742
|
+
"chat_template is not set, please set chat_template first."
|
743
|
+
)
|
744
|
+
elif isinstance(self.chat_template, Template):
|
745
|
+
add_generation_prompt = kwargs.pop("add_generation_prompt", True)
|
746
|
+
query = self._encode_chat_inputs(
|
747
|
+
conversations, context_data, add_generation_prompt=add_generation_prompt
|
748
|
+
)
|
749
|
+
elif isinstance(self.chat_template, ChatTemplate):
|
750
|
+
query = self._encode_chat_inputs_paddle(conversations, context_data)
|
751
|
+
return query
|
752
|
+
|
753
|
+
def _encode_chat_inputs_paddle(
|
754
|
+
self, conversations: List[List[str]], context_data: Dict[str, Any] = {}
|
755
|
+
):
|
756
|
+
context_data = self.chat_template._init_context_data(context_data)
|
757
|
+
# encode system
|
758
|
+
result = {}
|
759
|
+
if self.chat_template.system:
|
760
|
+
system = self.chat_template.render_system(context_data)
|
761
|
+
result["system"] = self.encode(system, add_special_tokens=False)[
|
762
|
+
"input_ids"
|
763
|
+
]
|
764
|
+
|
765
|
+
# encode conversation
|
766
|
+
conversation_ids = []
|
767
|
+
for index, conversation in enumerate(conversations):
|
768
|
+
# give more control to chat_template
|
769
|
+
context_data["is_first"] = index == 0
|
770
|
+
context_data["is_last"] = index == len(conversations) - 1
|
771
|
+
|
772
|
+
user_input, bot_output = self.chat_template.render_conversation(
|
773
|
+
conversation, index=index, context_data=context_data
|
774
|
+
)
|
775
|
+
user_ids = self.encode(user_input, add_special_tokens=False)["input_ids"]
|
776
|
+
bot_ids = self.encode(bot_output, add_special_tokens=False)["input_ids"]
|
777
|
+
conversation_ids.append([user_ids, bot_ids])
|
778
|
+
|
779
|
+
result["conversations"] = conversation_ids
|
780
|
+
return result
|
781
|
+
|
782
|
+
def _encode_chat_inputs(
|
783
|
+
self,
|
784
|
+
conversations: List[List[str]],
|
785
|
+
context_data: Dict[str, Any] = {},
|
786
|
+
system: str = None,
|
787
|
+
add_generation_prompt=True,
|
788
|
+
):
|
789
|
+
result = {}
|
790
|
+
|
791
|
+
# Some template do not support system msg, so we need to check it first.
|
792
|
+
if system:
|
793
|
+
try:
|
794
|
+
self.chat_template.render(
|
795
|
+
messages={"role": "system", "content": system}
|
796
|
+
)
|
797
|
+
except Exception as e:
|
798
|
+
raise ValueError("System is not supported in this tokenizer.", e)
|
799
|
+
|
800
|
+
# convert list msg to role dict msg
|
801
|
+
conversation_dict = []
|
802
|
+
origin_msg = []
|
803
|
+
for round in conversations:
|
804
|
+
round_role = [
|
805
|
+
{"role": "user", "content": round[0]},
|
806
|
+
{"role": "assistant", "content": round[1]},
|
807
|
+
]
|
808
|
+
origin_msg.extend(round_role)
|
809
|
+
conversation_dict.append(round_role)
|
810
|
+
ans = []
|
811
|
+
|
812
|
+
# get answer in single round, then compile the chat entirely and split by single round ans
|
813
|
+
# attention: answer should include end token!
|
814
|
+
for conv in conversation_dict:
|
815
|
+
roundi = [system] + conv if system else conv
|
816
|
+
roundi_str = self.chat_template.render(
|
817
|
+
messages=roundi, add_generation_prompt=False, **self.special_tokens_map
|
818
|
+
)
|
819
|
+
roundi_no_ans = [system] + [conv[0]] if system else [conv[0]]
|
820
|
+
roundi_no_ans_str = self.chat_template.render(
|
821
|
+
messages=roundi_no_ans,
|
822
|
+
add_generation_prompt=add_generation_prompt,
|
823
|
+
**self.special_tokens_map,
|
824
|
+
)
|
825
|
+
ans_roundi = roundi_str[len(roundi_no_ans_str) :]
|
826
|
+
ans.append(ans_roundi)
|
827
|
+
|
828
|
+
non_learnable_parts = self._extract_non_learnable_parts(origin_msg, ans)
|
829
|
+
assert len(non_learnable_parts) == len(
|
830
|
+
ans
|
831
|
+
), f"Get non_learnable_parts len: {len(non_learnable_parts)}, but ans len: {len(ans)}."
|
832
|
+
|
833
|
+
conversation_ids = []
|
834
|
+
for i in range(len(non_learnable_parts)):
|
835
|
+
conversation_ids.append(
|
836
|
+
self.batch_encode(
|
837
|
+
[non_learnable_parts[i], ans[i]],
|
838
|
+
add_special_tokens=False,
|
839
|
+
padding=False,
|
840
|
+
)["input_ids"]
|
841
|
+
)
|
842
|
+
|
843
|
+
result["conversations"] = conversation_ids
|
844
|
+
return result
|
845
|
+
|
846
|
+
def _extract_non_learnable_parts(
|
847
|
+
self, origin_msg: List[Dict[str, str]], split_s: List[str]
|
848
|
+
):
|
849
|
+
"""Split the entire chat by specified words. Extract the non-learnable parts."""
|
850
|
+
# distinguish and replace the special words in original string to an uncompiled form: Like | -> \|
|
851
|
+
regex_pattern = "|".join(map(re.escape, split_s))
|
852
|
+
# splited by replaced specified words
|
853
|
+
non_learnable_parts = re.split(
|
854
|
+
r"(?:%s)" % regex_pattern,
|
855
|
+
self.chat_template.render(
|
856
|
+
messages=origin_msg,
|
857
|
+
add_generation_prompt=False,
|
858
|
+
**self.special_tokens_map,
|
859
|
+
),
|
860
|
+
)
|
861
|
+
if non_learnable_parts[-1] == "":
|
862
|
+
non_learnable_parts.pop()
|
863
|
+
return non_learnable_parts
|
864
|
+
|
865
|
+
@classmethod
|
866
|
+
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
|
867
|
+
cache_dir = kwargs.pop("cache_dir", None)
|
868
|
+
from_hf_hub = kwargs.pop("from_hf_hub", False)
|
869
|
+
from_aistudio = kwargs.pop("from_aistudio", False)
|
870
|
+
subfolder = kwargs.pop("subfolder", "")
|
871
|
+
if subfolder is None:
|
872
|
+
subfolder = ""
|
873
|
+
|
874
|
+
kwargs["subfolder"] = subfolder
|
875
|
+
kwargs["cache_dir"] = cache_dir
|
876
|
+
kwargs["from_hf_hub"] = from_hf_hub
|
877
|
+
kwargs["from_aistudio"] = from_aistudio
|
878
|
+
kwargs["return_tokenizer_file_dir"] = True
|
879
|
+
tokenizer, tokenizer_config_file_dir = super().from_pretrained(
|
880
|
+
pretrained_model_name_or_path, *args, **kwargs
|
881
|
+
)
|
882
|
+
|
883
|
+
# load chat-template
|
884
|
+
chat_template_file = os.path.join(
|
885
|
+
tokenizer_config_file_dir, CHAT_TEMPLATE_CONFIG_NAME
|
886
|
+
)
|
887
|
+
if not os.path.exists(chat_template_file):
|
888
|
+
return tokenizer
|
889
|
+
|
890
|
+
if tokenizer.chat_template is not None:
|
891
|
+
logging.warning(
|
892
|
+
"Chat-template already exists in config file, it will be overwritten by chat_template.json file."
|
893
|
+
)
|
894
|
+
logging.warning(
|
895
|
+
"`chat_template.json` will be deprecated in the future! Please set it in `tokenizer_config.json`."
|
896
|
+
)
|
897
|
+
tokenizer.init_chat_template(chat_template_file)
|
898
|
+
return tokenizer
|
899
|
+
|
900
|
+
def init_chat_template(self, chat_template: Union[str, dict]):
|
901
|
+
"""init chat_tempalte by file_path or template dict data
|
902
|
+
|
903
|
+
Args:
|
904
|
+
chat_template (str, dict): file_path or template dict data
|
905
|
+
"""
|
906
|
+
if isinstance(chat_template, str):
|
907
|
+
if not os.path.exists(chat_template):
|
908
|
+
try:
|
909
|
+
self.chat_template: Template = ChatTemplate._compile_jinja_template(
|
910
|
+
chat_template
|
911
|
+
)
|
912
|
+
except TemplateSyntaxError:
|
913
|
+
# It is neither jinjia string nor path string
|
914
|
+
raise TemplateSyntaxError(
|
915
|
+
"The chat-template in json is not valid jinja string: {}".format(
|
916
|
+
chat_template
|
917
|
+
),
|
918
|
+
lineno=0, # fake lineno, useless required msg
|
919
|
+
)
|
920
|
+
else:
|
921
|
+
self.chat_template = ChatTemplate.from_file(chat_template)
|
922
|
+
elif isinstance(chat_template, dict):
|
923
|
+
self.chat_template = ChatTemplate.from_dict(chat_template)
|
924
|
+
elif isinstance(chat_template, ChatTemplate):
|
925
|
+
self.chat_template = chat_template
|
926
|
+
else:
|
927
|
+
raise ValueError("Receive error chat_template data: ", chat_template)
|
928
|
+
|
929
|
+
def save_resources(self, save_directory):
|
930
|
+
super().save_resources(save_directory)
|
931
|
+
|
932
|
+
if isinstance(
|
933
|
+
self.chat_template, ChatTemplate
|
934
|
+
): # Future remove if ChatTemplate is deprecated
|
935
|
+
chat_template_file = os.path.join(save_directory, CHAT_TEMPLATE_CONFIG_NAME)
|
936
|
+
with open(chat_template_file, "w", encoding="utf-8") as f:
|
937
|
+
json.dump(asdict(self.chat_template), f, ensure_ascii=False, indent=4)
|
938
|
+
logging.info("Chat-template config file saved in " + chat_template_file)
|
939
|
+
|
940
|
+
|
941
|
+
class PretrainedTokenizer(
|
942
|
+
ChatTemplateMixin, PretrainedTokenizerBase, metaclass=InitTrackerMeta
|
943
|
+
):
|
944
|
+
"""
|
945
|
+
Base class for all tokenizers.
|
946
|
+
|
947
|
+
Inherits from [`~tokenizer_utils_base.PretrainedTokenizerBase`].
|
948
|
+
|
949
|
+
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
|
950
|
+
pretrained tokenizers as well as adding tokens to the vocabulary.
|
951
|
+
|
952
|
+
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
|
953
|
+
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
|
954
|
+
|
955
|
+
- **resource_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
|
956
|
+
vocabulary file required by the model, and as associated values, the filename for saving the associated file
|
957
|
+
(string).
|
958
|
+
- **pretrained_resource_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
|
959
|
+
high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
|
960
|
+
low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
|
961
|
+
associated pretrained vocabulary file.
|
962
|
+
- **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
|
963
|
+
of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
|
964
|
+
or `None` if the model has no maximum input size.
|
965
|
+
- **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
|
966
|
+
`short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
|
967
|
+
pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
|
968
|
+
with the [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`] method.
|
969
|
+
- **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
|
970
|
+
- **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
|
971
|
+
Should be `'right'` or `'left'`.
|
972
|
+
- **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
|
973
|
+
applied. Should be `'right'` or `'left'`.
|
974
|
+
|
975
|
+
Moreover, methods common to tokenizers for tokenization, token/id conversion
|
976
|
+
and encoding as model inputs are also provided here.
|
977
|
+
|
978
|
+
Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
|
979
|
+
by which subclasses can track arguments for initialization automatically
|
980
|
+
and expose special tokens initialization used as attributes.
|
981
|
+
"""
|
982
|
+
|
983
|
+
added_tokens_encoder: Dict[str, int] = {}
|
984
|
+
added_tokens_decoder: Dict[int, str] = {}
|
985
|
+
unique_no_split_tokens: List[str] = []
|
986
|
+
tokens_trie = Trie()
|
987
|
+
|
988
|
+
_decode_use_source_tokenizer = False
|
989
|
+
|
990
|
+
def _pre_init(self, original_init, *args, **kwargs):
|
991
|
+
"""
|
992
|
+
It would be hooked before `__init__` to add specials tokens (arguments of
|
993
|
+
`__init__` whose name ends with `_token`) as attributes of the tokenizer
|
994
|
+
instance.
|
995
|
+
"""
|
996
|
+
init_dict = fn_args_to_dict(original_init, *((self,) + args), **kwargs)
|
997
|
+
init_dict.pop("self", None)
|
998
|
+
super(PretrainedTokenizer, self).__init__(**init_dict)
|
999
|
+
|
1000
|
+
self.added_tokens_decoder: Dict[int, AddedToken] = {}
|
1001
|
+
self.added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {}))
|
1002
|
+
self.added_tokens_encoder: Dict[str, int] = {
|
1003
|
+
k.content: v for v, k in self.added_tokens_decoder.items()
|
1004
|
+
}
|
1005
|
+
|
1006
|
+
self.unique_no_split_tokens: List[str] = []
|
1007
|
+
self.tokens_trie = Trie()
|
1008
|
+
|
1009
|
+
self._decode_use_source_tokenizer = False
|
1010
|
+
|
1011
|
+
def _build_special_tokens_map_extended(self, **kwargs):
|
1012
|
+
for key, value in kwargs.items():
|
1013
|
+
if value is None:
|
1014
|
+
continue
|
1015
|
+
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
|
1016
|
+
if key == "additional_special_tokens":
|
1017
|
+
assert isinstance(
|
1018
|
+
value, (list, tuple)
|
1019
|
+
), f"Value {value} is not a list or tuple"
|
1020
|
+
assert all(
|
1021
|
+
isinstance(t, (str, AddedToken)) for t in value
|
1022
|
+
), "One of the tokens is not a string or an AddedToken"
|
1023
|
+
setattr(self, key, value)
|
1024
|
+
elif isinstance(value, (str, AddedToken)):
|
1025
|
+
setattr(self, key, value)
|
1026
|
+
else:
|
1027
|
+
raise TypeError(
|
1028
|
+
f"special token {key} has to be either str or AddedToken but got: {type(value)}"
|
1029
|
+
)
|
1030
|
+
|
1031
|
+
@property
|
1032
|
+
def vocab_size(self) -> int:
|
1033
|
+
"""
|
1034
|
+
`int`: Size of the base vocabulary (without the added tokens).
|
1035
|
+
"""
|
1036
|
+
raise NotImplementedError
|
1037
|
+
|
1038
|
+
@property
|
1039
|
+
def is_fast(self) -> bool:
|
1040
|
+
return False
|
1041
|
+
|
1042
|
+
def get_added_vocab(self) -> Dict[str, int]:
|
1043
|
+
"""
|
1044
|
+
Returns the added tokens in the vocabulary as a dictionary of token to index.
|
1045
|
+
|
1046
|
+
Returns:
|
1047
|
+
`Dict[str, int]`: The added tokens.
|
1048
|
+
"""
|
1049
|
+
return self.added_tokens_encoder
|
1050
|
+
|
1051
|
+
def __len__(self):
|
1052
|
+
"""
|
1053
|
+
Size of the full vocabulary with the added tokens.
|
1054
|
+
"""
|
1055
|
+
return self.vocab_size + len(self.added_tokens_encoder)
|
1056
|
+
|
1057
|
+
def _add_tokens(
|
1058
|
+
self,
|
1059
|
+
new_tokens: Union[List[str], List[AddedToken]],
|
1060
|
+
special_tokens: bool = False,
|
1061
|
+
) -> int:
|
1062
|
+
"""
|
1063
|
+
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
|
1064
|
+
it with indices starting from length of the current vocabulary.
|
1065
|
+
|
1066
|
+
Args:
|
1067
|
+
new_tokens (`List[str]`or `List[AddedToken]`):
|
1068
|
+
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
|
1069
|
+
checking if the tokenizer assign the index of the `unk_token` to them).
|
1070
|
+
special_tokens (`bool`, *optional*, defaults to `False`):
|
1071
|
+
Whether or not the tokens should be added as special tokens.
|
1072
|
+
|
1073
|
+
Returns:
|
1074
|
+
`int`: The number of tokens actually added to the vocabulary.
|
1075
|
+
|
1076
|
+
Examples:
|
1077
|
+
|
1078
|
+
```python
|
1079
|
+
# Let's see how to increase the vocabulary of Bert model and tokenizer
|
1080
|
+
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
|
1081
|
+
model = BertModel.from_pretrained("bert-base-uncased")
|
1082
|
+
|
1083
|
+
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
|
1084
|
+
print("We have added", num_added_toks, "tokens")
|
1085
|
+
```"""
|
1086
|
+
new_tokens = [str(tok) for tok in new_tokens]
|
1087
|
+
|
1088
|
+
tokens_to_add = []
|
1089
|
+
for token in new_tokens:
|
1090
|
+
if not isinstance(token, str):
|
1091
|
+
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
|
1092
|
+
if (
|
1093
|
+
not special_tokens
|
1094
|
+
and hasattr(self, "do_lower_case")
|
1095
|
+
and self.do_lower_case
|
1096
|
+
):
|
1097
|
+
token = token.lower()
|
1098
|
+
if (
|
1099
|
+
token != self.unk_token
|
1100
|
+
and self.convert_tokens_to_ids(token)
|
1101
|
+
== self.convert_tokens_to_ids(self.unk_token)
|
1102
|
+
and token not in tokens_to_add
|
1103
|
+
and token not in self.added_tokens_encoder.keys()
|
1104
|
+
):
|
1105
|
+
tokens_to_add.append(token)
|
1106
|
+
if self.verbose:
|
1107
|
+
logging.info(f"Adding {token} to the vocabulary")
|
1108
|
+
|
1109
|
+
added_tok_encoder = dict(
|
1110
|
+
(tok, len(self) + i) for i, tok in enumerate(tokens_to_add)
|
1111
|
+
)
|
1112
|
+
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
|
1113
|
+
self.added_tokens_encoder.update(added_tok_encoder)
|
1114
|
+
self.added_tokens_decoder.update(added_tok_decoder)
|
1115
|
+
|
1116
|
+
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
|
1117
|
+
if special_tokens:
|
1118
|
+
if len(new_tokens) == 1:
|
1119
|
+
_insert_one_token_to_ordered_list(
|
1120
|
+
self.unique_no_split_tokens, new_tokens[0]
|
1121
|
+
)
|
1122
|
+
else:
|
1123
|
+
self.unique_no_split_tokens = sorted(
|
1124
|
+
set(self.unique_no_split_tokens).union(set(new_tokens))
|
1125
|
+
)
|
1126
|
+
else:
|
1127
|
+
# Or on the newly added tokens
|
1128
|
+
if len(tokens_to_add) == 1:
|
1129
|
+
_insert_one_token_to_ordered_list(
|
1130
|
+
self.unique_no_split_tokens, tokens_to_add[0]
|
1131
|
+
)
|
1132
|
+
else:
|
1133
|
+
self.unique_no_split_tokens = sorted(
|
1134
|
+
set(self.unique_no_split_tokens).union(set(tokens_to_add))
|
1135
|
+
)
|
1136
|
+
self._create_trie(self.unique_no_split_tokens)
|
1137
|
+
|
1138
|
+
return len(tokens_to_add)
|
1139
|
+
|
1140
|
+
def _create_trie(self, unique_no_split_tokens):
|
1141
|
+
trie = Trie()
|
1142
|
+
for token in unique_no_split_tokens:
|
1143
|
+
if (
|
1144
|
+
hasattr(self, "do_lower_case")
|
1145
|
+
and self.do_lower_case
|
1146
|
+
and token not in self.all_special_tokens
|
1147
|
+
):
|
1148
|
+
trie.add(token.lower())
|
1149
|
+
else:
|
1150
|
+
trie.add(token)
|
1151
|
+
self.tokens_trie = trie
|
1152
|
+
|
1153
|
+
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
|
1154
|
+
"""
|
1155
|
+
Performs any necessary transformations before tokenization.
|
1156
|
+
|
1157
|
+
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
|
1158
|
+
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
|
1159
|
+
|
1160
|
+
Args:
|
1161
|
+
text (`str`):
|
1162
|
+
The text to prepare.
|
1163
|
+
is_split_into_words (`bool`, *optional*, defaults to `False`):
|
1164
|
+
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
|
1165
|
+
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
|
1166
|
+
which it will tokenize. This is useful for NER or token classification.
|
1167
|
+
kwargs:
|
1168
|
+
Keyword arguments to use for the tokenization.
|
1169
|
+
|
1170
|
+
Returns:
|
1171
|
+
`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
|
1172
|
+
"""
|
1173
|
+
|
1174
|
+
return (text, kwargs)
|
1175
|
+
|
1176
|
+
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
|
1177
|
+
"""
|
1178
|
+
Converts a string in a sequence of tokens, using the tokenizer.
|
1179
|
+
|
1180
|
+
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
|
1181
|
+
(BPE/SentencePieces/WordPieces). Takes care of added tokens.
|
1182
|
+
|
1183
|
+
Args:
|
1184
|
+
text (`str`):
|
1185
|
+
The sequence to be encoded.
|
1186
|
+
**kwargs (additional keyword arguments):
|
1187
|
+
Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
|
1188
|
+
|
1189
|
+
Returns:
|
1190
|
+
`List[str]`: The list of tokens.
|
1191
|
+
"""
|
1192
|
+
|
1193
|
+
split_special_tokens = kwargs.pop(
|
1194
|
+
"split_special_tokens", self.split_special_tokens
|
1195
|
+
)
|
1196
|
+
|
1197
|
+
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
|
1198
|
+
all_special_tokens_extended = dict(
|
1199
|
+
(str(t), t)
|
1200
|
+
for t in self.all_special_tokens_extended
|
1201
|
+
if isinstance(t, AddedToken)
|
1202
|
+
)
|
1203
|
+
|
1204
|
+
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
|
1205
|
+
|
1206
|
+
# TODO: should this be in the base class?
|
1207
|
+
if hasattr(self, "do_lower_case") and self.do_lower_case:
|
1208
|
+
# convert non-special tokens to lowercase
|
1209
|
+
escaped_special_toks = [
|
1210
|
+
re.escape(s_tok)
|
1211
|
+
for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
|
1212
|
+
]
|
1213
|
+
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
|
1214
|
+
text = re.sub(
|
1215
|
+
pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text
|
1216
|
+
)
|
1217
|
+
|
1218
|
+
if split_special_tokens:
|
1219
|
+
no_split_token = []
|
1220
|
+
tokens = [text]
|
1221
|
+
else:
|
1222
|
+
no_split_token = set(
|
1223
|
+
self.unique_no_split_tokens
|
1224
|
+
) # don't split on any of the added tokens
|
1225
|
+
# "This is something<special_token_1> else"
|
1226
|
+
tokens = self.tokens_trie.split(text)
|
1227
|
+
|
1228
|
+
# ["This is something", "<special_token_1>", " else"]
|
1229
|
+
for i, token in enumerate(tokens):
|
1230
|
+
if token in no_split_token:
|
1231
|
+
tok_extended = all_special_tokens_extended.get(token, None)
|
1232
|
+
left = tokens[i - 1] if i > 0 else None
|
1233
|
+
right = tokens[i + 1] if i < len(tokens) - 1 else None
|
1234
|
+
if isinstance(tok_extended, AddedToken):
|
1235
|
+
if tok_extended.rstrip and right:
|
1236
|
+
# A bit counter-intuitive but we strip the left of the string
|
1237
|
+
# since tok_extended.rstrip means the special token is eating all white spaces on its right
|
1238
|
+
tokens[i + 1] = right.lstrip()
|
1239
|
+
# Strip white spaces on the left
|
1240
|
+
if tok_extended.lstrip and left:
|
1241
|
+
tokens[i - 1] = left.rstrip() # Opposite here
|
1242
|
+
else:
|
1243
|
+
# We strip left and right by default
|
1244
|
+
if right:
|
1245
|
+
tokens[i + 1] = right.lstrip()
|
1246
|
+
if left:
|
1247
|
+
tokens[i - 1] = left.rstrip()
|
1248
|
+
# ["This is something", "<special_token_1>", "else"]
|
1249
|
+
tokenized_text = []
|
1250
|
+
for token in tokens:
|
1251
|
+
# Need to skip eventual empty (fully stripped) tokens
|
1252
|
+
if not token:
|
1253
|
+
continue
|
1254
|
+
if token in no_split_token:
|
1255
|
+
tokenized_text.append(token)
|
1256
|
+
else:
|
1257
|
+
tokenized_text.extend(self._tokenize(token))
|
1258
|
+
# ["This", " is", " something", "<special_token_1>", "else"]
|
1259
|
+
return tokenized_text
|
1260
|
+
|
1261
|
+
def _tokenize(self, text, **kwargs):
|
1262
|
+
"""
|
1263
|
+
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
1264
|
+
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
1265
|
+
|
1266
|
+
Do NOT take care of added tokens.
|
1267
|
+
"""
|
1268
|
+
raise NotImplementedError
|
1269
|
+
|
1270
|
+
def convert_tokens_to_ids(self, tokens):
|
1271
|
+
if tokens is None:
|
1272
|
+
return None
|
1273
|
+
|
1274
|
+
if isinstance(tokens, str):
|
1275
|
+
return self._convert_token_to_id_with_added_voc(tokens)
|
1276
|
+
|
1277
|
+
ids = []
|
1278
|
+
for token in tokens:
|
1279
|
+
ids.append(self._convert_token_to_id_with_added_voc(token))
|
1280
|
+
|
1281
|
+
return ids
|
1282
|
+
|
1283
|
+
def _convert_token_to_id_with_added_voc(self, token):
|
1284
|
+
if token is None:
|
1285
|
+
return None
|
1286
|
+
|
1287
|
+
if token in self.added_tokens_encoder:
|
1288
|
+
return self.added_tokens_encoder[token]
|
1289
|
+
return self._convert_token_to_id(token)
|
1290
|
+
|
1291
|
+
def _convert_token_to_id(self, token):
|
1292
|
+
|
1293
|
+
return self.vocab.to_indices(token)
|
1294
|
+
|
1295
|
+
def convert_tokens_to_string(self, tokens):
|
1296
|
+
"""
|
1297
|
+
Converts a sequence of tokens (list of string) to a single string by
|
1298
|
+
using ``' '.join(tokens)`` .
|
1299
|
+
|
1300
|
+
Args:
|
1301
|
+
tokens (list[str]): A sequence of tokens.
|
1302
|
+
|
1303
|
+
Returns:
|
1304
|
+
str: Converted string.
|
1305
|
+
"""
|
1306
|
+
return " ".join(tokens)
|
1307
|
+
|
1308
|
+
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
|
1309
|
+
if isinstance(ids, int):
|
1310
|
+
if ids in self.added_tokens_decoder:
|
1311
|
+
token = self.added_tokens_decoder[ids]
|
1312
|
+
token = token.content if isinstance(token, AddedToken) else token
|
1313
|
+
return token
|
1314
|
+
else:
|
1315
|
+
return self._convert_id_to_token(ids)
|
1316
|
+
tokens = []
|
1317
|
+
for index in ids:
|
1318
|
+
index = int(index)
|
1319
|
+
if skip_special_tokens and index in self.all_special_ids:
|
1320
|
+
continue
|
1321
|
+
if index in self.added_tokens_decoder:
|
1322
|
+
token = self.added_tokens_decoder[index]
|
1323
|
+
token = token.content if isinstance(token, AddedToken) else token
|
1324
|
+
tokens.append(token)
|
1325
|
+
else:
|
1326
|
+
tokens.append(self._convert_id_to_token(index))
|
1327
|
+
return tokens
|
1328
|
+
|
1329
|
+
def _convert_id_to_token(self, index):
|
1330
|
+
|
1331
|
+
return self.vocab.to_tokens(index)
|
1332
|
+
|
1333
|
+
@staticmethod
|
1334
|
+
def load_vocabulary(
|
1335
|
+
filepath,
|
1336
|
+
unk_token=None,
|
1337
|
+
pad_token=None,
|
1338
|
+
bos_token=None,
|
1339
|
+
eos_token=None,
|
1340
|
+
**kwargs,
|
1341
|
+
):
|
1342
|
+
"""
|
1343
|
+
Instantiate an instance of `Vocab` from a file reserving all tokens
|
1344
|
+
by using `Vocab.from_dict`. The file contains a token per line, and the
|
1345
|
+
line number would be the index of corresponding token.
|
1346
|
+
|
1347
|
+
Args:
|
1348
|
+
filepath (str): path of file to construct vocabulary.
|
1349
|
+
unk_token (str): special token for unknown token. If no need, it also
|
1350
|
+
could be `None`. Defaults to `None`.
|
1351
|
+
pad_token (str): special token for padding token. If no need, it also
|
1352
|
+
could be `None`. Defaults to `None`.
|
1353
|
+
bos_token (str): special token for bos token. If no need, it also
|
1354
|
+
could be `None`. Defaults to `None`.
|
1355
|
+
eos_token (str): special token for eos token. If no need, it also
|
1356
|
+
could be `None`. Defaults to `None`.
|
1357
|
+
**kwargs (dict): keyword arguments for `Vocab.from_dict`.
|
1358
|
+
|
1359
|
+
Returns:
|
1360
|
+
Vocab: An instance of `Vocab`.
|
1361
|
+
"""
|
1362
|
+
token_to_idx = {}
|
1363
|
+
with io.open(filepath, "r", encoding="utf-8") as f:
|
1364
|
+
for index, line in enumerate(f):
|
1365
|
+
token = line.rstrip("\n")
|
1366
|
+
token_to_idx[token] = int(index)
|
1367
|
+
vocab = Vocab.from_dict(
|
1368
|
+
token_to_idx,
|
1369
|
+
unk_token=unk_token,
|
1370
|
+
pad_token=pad_token,
|
1371
|
+
bos_token=bos_token,
|
1372
|
+
eos_token=eos_token,
|
1373
|
+
**kwargs,
|
1374
|
+
)
|
1375
|
+
return vocab
|
1376
|
+
|
1377
|
+
@staticmethod
|
1378
|
+
def save_vocabulary(filepath, vocab):
|
1379
|
+
"""
|
1380
|
+
Save all tokens to a vocabulary file. The file contains a token per line,
|
1381
|
+
and the line number would be the index of corresponding token.
|
1382
|
+
|
1383
|
+
Args:
|
1384
|
+
filepath (str): File path to be saved to.
|
1385
|
+
vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
|
1386
|
+
"""
|
1387
|
+
if isinstance(vocab, Vocab):
|
1388
|
+
tokens = vocab.idx_to_token
|
1389
|
+
else:
|
1390
|
+
tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
|
1391
|
+
with io.open(filepath, "w", encoding="utf-8") as f:
|
1392
|
+
for token in tokens:
|
1393
|
+
f.write(token + "\n")
|
1394
|
+
|
1395
|
+
def get_special_tokens_mask(
|
1396
|
+
self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
|
1397
|
+
):
|
1398
|
+
"""
|
1399
|
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
1400
|
+
special tokens using the tokenizer ``encode`` methods.
|
1401
|
+
|
1402
|
+
Args:
|
1403
|
+
token_ids_0 (List[int]): List of ids of the first sequence.
|
1404
|
+
token_ids_1 (List[int], optional): List of ids of the second sequence.
|
1405
|
+
already_has_special_tokens (bool, optional): Whether or not the token list is already
|
1406
|
+
formatted with special tokens for the model. Defaults to None.
|
1407
|
+
|
1408
|
+
Returns:
|
1409
|
+
results (List[int]): The list of integers in the range [0, 1]:
|
1410
|
+
1 for a special token, 0 for a sequence token.
|
1411
|
+
"""
|
1412
|
+
if already_has_special_tokens:
|
1413
|
+
if token_ids_1 is not None:
|
1414
|
+
raise ValueError(
|
1415
|
+
"You should not supply a second sequence if the provided sequence of "
|
1416
|
+
"ids is already formatted with special tokens for the model."
|
1417
|
+
)
|
1418
|
+
|
1419
|
+
return super().get_special_tokens_mask(
|
1420
|
+
token_ids_0=token_ids_0,
|
1421
|
+
token_ids_1=token_ids_1,
|
1422
|
+
already_has_special_tokens=True,
|
1423
|
+
)
|
1424
|
+
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
|
1425
|
+
|
1426
|
+
def num_special_tokens_to_add(self, pair):
|
1427
|
+
"""
|
1428
|
+
Returns the number of added tokens when encoding a sequence with special tokens.
|
1429
|
+
|
1430
|
+
Args:
|
1431
|
+
pair (bool, optional):
|
1432
|
+
Whether the number of added tokens should be computed in the case of a sequence pair or a single
|
1433
|
+
sequence. Defaults to `False`.
|
1434
|
+
Returns:
|
1435
|
+
int: Number of special tokens added to sequences.
|
1436
|
+
"""
|
1437
|
+
token_ids_0 = []
|
1438
|
+
token_ids_1 = []
|
1439
|
+
return len(
|
1440
|
+
self.build_inputs_with_special_tokens(
|
1441
|
+
token_ids_0, token_ids_1 if pair else None
|
1442
|
+
)
|
1443
|
+
)
|
1444
|
+
|
1445
|
+
def _encode_plus(
|
1446
|
+
self,
|
1447
|
+
text: Union[TextInput, PreTokenizedInput, EncodedInput],
|
1448
|
+
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
|
1449
|
+
add_special_tokens: bool = True,
|
1450
|
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
1451
|
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
1452
|
+
max_length: Optional[int] = None,
|
1453
|
+
stride: int = 0,
|
1454
|
+
is_split_into_words: bool = False,
|
1455
|
+
pad_to_multiple_of: Optional[int] = None,
|
1456
|
+
padding_side: Optional[Literal["right", "left"]] = None,
|
1457
|
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1458
|
+
return_position_ids: Optional[bool] = None,
|
1459
|
+
return_token_type_ids: Optional[bool] = None,
|
1460
|
+
return_attention_mask: Optional[bool] = None,
|
1461
|
+
return_overflowing_tokens: bool = False,
|
1462
|
+
return_special_tokens_mask: bool = False,
|
1463
|
+
return_offsets_mapping: bool = False,
|
1464
|
+
return_length: bool = False,
|
1465
|
+
verbose: bool = True,
|
1466
|
+
**kwargs,
|
1467
|
+
) -> BatchEncoding:
|
1468
|
+
def get_input_ids(text):
|
1469
|
+
if isinstance(text, str):
|
1470
|
+
tokens = self.tokenize(text, **kwargs)
|
1471
|
+
return self.convert_tokens_to_ids(tokens)
|
1472
|
+
elif (
|
1473
|
+
isinstance(text, (list, tuple))
|
1474
|
+
and len(text) > 0
|
1475
|
+
and isinstance(text[0], str)
|
1476
|
+
):
|
1477
|
+
if is_split_into_words:
|
1478
|
+
tokens = list(
|
1479
|
+
itertools.chain(
|
1480
|
+
*(
|
1481
|
+
self.tokenize(t, is_split_into_words=True, **kwargs)
|
1482
|
+
for t in text
|
1483
|
+
)
|
1484
|
+
)
|
1485
|
+
)
|
1486
|
+
return self.convert_tokens_to_ids(tokens)
|
1487
|
+
else:
|
1488
|
+
return self.convert_tokens_to_ids(text)
|
1489
|
+
elif (
|
1490
|
+
isinstance(text, (list, tuple))
|
1491
|
+
and len(text) > 0
|
1492
|
+
and isinstance(text[0], int)
|
1493
|
+
):
|
1494
|
+
return text
|
1495
|
+
else:
|
1496
|
+
if is_split_into_words:
|
1497
|
+
raise ValueError(
|
1498
|
+
f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
|
1499
|
+
)
|
1500
|
+
else:
|
1501
|
+
raise ValueError(
|
1502
|
+
f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
|
1503
|
+
)
|
1504
|
+
|
1505
|
+
first_ids = get_input_ids(text)
|
1506
|
+
second_ids = get_input_ids(text_pair) if text_pair is not None else None
|
1507
|
+
|
1508
|
+
if return_offsets_mapping:
|
1509
|
+
kwargs["text"] = text
|
1510
|
+
kwargs["text_pair"] = text_pair
|
1511
|
+
|
1512
|
+
return self.prepare_for_model(
|
1513
|
+
first_ids,
|
1514
|
+
pair_ids=second_ids,
|
1515
|
+
add_special_tokens=add_special_tokens,
|
1516
|
+
padding=padding_strategy.value,
|
1517
|
+
truncation=truncation_strategy.value,
|
1518
|
+
max_length=max_length,
|
1519
|
+
stride=stride,
|
1520
|
+
pad_to_multiple_of=pad_to_multiple_of,
|
1521
|
+
padding_side=padding_side,
|
1522
|
+
return_tensors=return_tensors,
|
1523
|
+
prepend_batch_axis=True,
|
1524
|
+
return_position_ids=return_position_ids,
|
1525
|
+
return_attention_mask=return_attention_mask,
|
1526
|
+
return_token_type_ids=return_token_type_ids,
|
1527
|
+
return_overflowing_tokens=return_overflowing_tokens,
|
1528
|
+
return_special_tokens_mask=return_special_tokens_mask,
|
1529
|
+
return_offsets_mapping=return_offsets_mapping,
|
1530
|
+
return_length=return_length,
|
1531
|
+
verbose=verbose,
|
1532
|
+
**kwargs,
|
1533
|
+
)
|
1534
|
+
|
1535
|
+
def _batch_encode_plus(
|
1536
|
+
self,
|
1537
|
+
batch_text_or_text_pairs: Union[
|
1538
|
+
List[TextInput],
|
1539
|
+
List[TextInputPair],
|
1540
|
+
List[PreTokenizedInput],
|
1541
|
+
List[PreTokenizedInputPair],
|
1542
|
+
List[EncodedInput],
|
1543
|
+
List[EncodedInputPair],
|
1544
|
+
],
|
1545
|
+
add_special_tokens: bool = True,
|
1546
|
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
1547
|
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
1548
|
+
max_length: Optional[int] = None,
|
1549
|
+
stride: int = 0,
|
1550
|
+
is_split_into_words: bool = False,
|
1551
|
+
pad_to_multiple_of: Optional[int] = None,
|
1552
|
+
padding_side: Optional[Literal["right", "left"]] = None,
|
1553
|
+
return_position_ids: Optional[bool] = None,
|
1554
|
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1555
|
+
return_token_type_ids: Optional[bool] = None,
|
1556
|
+
return_attention_mask: Optional[bool] = None,
|
1557
|
+
return_overflowing_tokens: bool = False,
|
1558
|
+
return_special_tokens_mask: bool = False,
|
1559
|
+
return_dict: bool = True,
|
1560
|
+
return_offsets_mapping: bool = False,
|
1561
|
+
return_length: bool = False,
|
1562
|
+
verbose: bool = True,
|
1563
|
+
**kwargs,
|
1564
|
+
) -> BatchEncoding:
|
1565
|
+
def get_input_ids(text):
|
1566
|
+
if isinstance(text, str):
|
1567
|
+
tokens = self.tokenize(text, **kwargs)
|
1568
|
+
return self.convert_tokens_to_ids(tokens)
|
1569
|
+
elif (
|
1570
|
+
isinstance(text, (list, tuple))
|
1571
|
+
and len(text) > 0
|
1572
|
+
and isinstance(text[0], str)
|
1573
|
+
):
|
1574
|
+
if is_split_into_words:
|
1575
|
+
tokens = list(
|
1576
|
+
itertools.chain(
|
1577
|
+
*(
|
1578
|
+
self.tokenize(t, is_split_into_words=True, **kwargs)
|
1579
|
+
for t in text
|
1580
|
+
)
|
1581
|
+
)
|
1582
|
+
)
|
1583
|
+
return self.convert_tokens_to_ids(tokens)
|
1584
|
+
else:
|
1585
|
+
return self.convert_tokens_to_ids(text)
|
1586
|
+
elif (
|
1587
|
+
isinstance(text, (list, tuple))
|
1588
|
+
and len(text) > 0
|
1589
|
+
and isinstance(text[0], int)
|
1590
|
+
):
|
1591
|
+
return text
|
1592
|
+
else:
|
1593
|
+
raise ValueError(
|
1594
|
+
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
|
1595
|
+
)
|
1596
|
+
|
1597
|
+
input_ids = []
|
1598
|
+
for ids_or_pair_ids in batch_text_or_text_pairs:
|
1599
|
+
if not isinstance(ids_or_pair_ids, (list, tuple)):
|
1600
|
+
ids, pair_ids = ids_or_pair_ids, None
|
1601
|
+
elif is_split_into_words and not isinstance(
|
1602
|
+
ids_or_pair_ids[0], (list, tuple)
|
1603
|
+
):
|
1604
|
+
ids, pair_ids = ids_or_pair_ids, None
|
1605
|
+
else:
|
1606
|
+
ids, pair_ids = ids_or_pair_ids
|
1607
|
+
|
1608
|
+
first_ids = get_input_ids(ids)
|
1609
|
+
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
|
1610
|
+
input_ids.append((first_ids, second_ids))
|
1611
|
+
|
1612
|
+
if stride > 0 and second_ids is not None:
|
1613
|
+
kwargs["batch_text_or_text_pairs"] = batch_text_or_text_pairs
|
1614
|
+
else:
|
1615
|
+
if return_offsets_mapping:
|
1616
|
+
has_pair = False
|
1617
|
+
if len(batch_text_or_text_pairs) > 0:
|
1618
|
+
if isinstance(batch_text_or_text_pairs[0], (list, tuple)):
|
1619
|
+
has_pair = True
|
1620
|
+
kwargs["texts"] = None
|
1621
|
+
kwargs["text_pairs"] = None
|
1622
|
+
if has_pair:
|
1623
|
+
kwargs["texts"] = [text[0] for text in batch_text_or_text_pairs]
|
1624
|
+
kwargs["text_pairs"] = [
|
1625
|
+
text[1] for text in batch_text_or_text_pairs
|
1626
|
+
]
|
1627
|
+
else:
|
1628
|
+
kwargs["texts"] = [text for text in batch_text_or_text_pairs]
|
1629
|
+
|
1630
|
+
batch_outputs = self._batch_prepare_for_model(
|
1631
|
+
input_ids,
|
1632
|
+
add_special_tokens=add_special_tokens,
|
1633
|
+
padding_strategy=padding_strategy,
|
1634
|
+
truncation_strategy=truncation_strategy,
|
1635
|
+
max_length=max_length,
|
1636
|
+
stride=stride,
|
1637
|
+
pad_to_multiple_of=pad_to_multiple_of,
|
1638
|
+
padding_side=padding_side,
|
1639
|
+
return_position_ids=return_position_ids,
|
1640
|
+
return_attention_mask=return_attention_mask,
|
1641
|
+
return_token_type_ids=return_token_type_ids,
|
1642
|
+
return_overflowing_tokens=return_overflowing_tokens,
|
1643
|
+
return_special_tokens_mask=return_special_tokens_mask,
|
1644
|
+
return_dict=return_dict,
|
1645
|
+
return_offsets_mapping=return_offsets_mapping,
|
1646
|
+
return_length=return_length,
|
1647
|
+
return_tensors=return_tensors,
|
1648
|
+
verbose=verbose,
|
1649
|
+
**kwargs,
|
1650
|
+
)
|
1651
|
+
|
1652
|
+
return batch_outputs
|
1653
|
+
|
1654
|
+
def _batch_prepare_for_model(
|
1655
|
+
self,
|
1656
|
+
batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
|
1657
|
+
add_special_tokens: bool = True,
|
1658
|
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
1659
|
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
1660
|
+
max_length: Optional[int] = None,
|
1661
|
+
stride: int = 0,
|
1662
|
+
pad_to_multiple_of: Optional[int] = None,
|
1663
|
+
padding_side: Optional[Literal["right", "left"]] = None,
|
1664
|
+
return_position_ids: Optional[bool] = None,
|
1665
|
+
return_tensors: Optional[str] = None,
|
1666
|
+
return_token_type_ids: Optional[bool] = None,
|
1667
|
+
return_attention_mask: Optional[bool] = None,
|
1668
|
+
return_overflowing_tokens: bool = False,
|
1669
|
+
return_special_tokens_mask: bool = False,
|
1670
|
+
return_dict: bool = True,
|
1671
|
+
return_offsets_mapping: bool = False,
|
1672
|
+
return_length: bool = False,
|
1673
|
+
verbose: bool = True,
|
1674
|
+
**kwargs,
|
1675
|
+
) -> BatchEncoding:
|
1676
|
+
"""
|
1677
|
+
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
|
1678
|
+
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
|
1679
|
+
manages a moving window (with user defined stride) for overflowing tokens
|
1680
|
+
|
1681
|
+
Args:
|
1682
|
+
batch_ids_pairs: list of tokenized input ids or input ids pairs
|
1683
|
+
"""
|
1684
|
+
if return_token_type_ids and not add_special_tokens:
|
1685
|
+
raise ValueError(
|
1686
|
+
"Asking to return token_type_ids while setting add_special_tokens to False "
|
1687
|
+
"results in an undefined behavior. Please set add_special_tokens to True or "
|
1688
|
+
"set return_token_type_ids to None."
|
1689
|
+
)
|
1690
|
+
|
1691
|
+
batch_outputs = {}
|
1692
|
+
batch_outputs_list = []
|
1693
|
+
for example_id, (first_ids, second_ids) in enumerate(batch_ids_pairs):
|
1694
|
+
if stride > 0 and second_ids is not None:
|
1695
|
+
if return_token_type_ids is None:
|
1696
|
+
return_token_type_ids = "token_type_ids" in self.model_input_names
|
1697
|
+
if return_attention_mask is None:
|
1698
|
+
return_attention_mask = "attention_mask" in self.model_input_names
|
1699
|
+
|
1700
|
+
max_len_for_pair = (
|
1701
|
+
max_length
|
1702
|
+
- len(first_ids)
|
1703
|
+
- (
|
1704
|
+
self.num_special_tokens_to_add(pair=True)
|
1705
|
+
if add_special_tokens
|
1706
|
+
else 0
|
1707
|
+
)
|
1708
|
+
)
|
1709
|
+
|
1710
|
+
text, text_pair = kwargs["batch_text_or_text_pairs"][example_id]
|
1711
|
+
token_offset_mapping = self.get_offset_mapping(text)
|
1712
|
+
token_pair_offset_mapping = self.get_offset_mapping(text_pair)
|
1713
|
+
|
1714
|
+
offset = 0
|
1715
|
+
while offset < len(second_ids):
|
1716
|
+
encoded_inputs = {}
|
1717
|
+
length = len(second_ids) - offset
|
1718
|
+
if length > max_len_for_pair:
|
1719
|
+
length = max_len_for_pair
|
1720
|
+
|
1721
|
+
ids = first_ids
|
1722
|
+
pair_ids = second_ids[offset : offset + length]
|
1723
|
+
pair = bool(pair_ids is not None)
|
1724
|
+
mapping = token_offset_mapping
|
1725
|
+
pair_mapping = token_pair_offset_mapping[offset : offset + length]
|
1726
|
+
if add_special_tokens:
|
1727
|
+
offset_mapping = self.build_offset_mapping_with_special_tokens(
|
1728
|
+
mapping, pair_mapping
|
1729
|
+
)
|
1730
|
+
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
|
1731
|
+
token_type_ids = self.create_token_type_ids_from_sequences(
|
1732
|
+
ids, pair_ids
|
1733
|
+
)
|
1734
|
+
else:
|
1735
|
+
offset_mapping = mapping + pair_mapping
|
1736
|
+
sequence = ids + pair_ids if pair else ids
|
1737
|
+
token_type_ids = [0] * len(ids) + (
|
1738
|
+
[0] * len(pair_ids) if pair else []
|
1739
|
+
)
|
1740
|
+
encoded_inputs["offset_mapping"] = offset_mapping
|
1741
|
+
# Build output dictionary
|
1742
|
+
encoded_inputs["input_ids"] = sequence
|
1743
|
+
if return_token_type_ids:
|
1744
|
+
encoded_inputs["token_type_ids"] = token_type_ids
|
1745
|
+
if return_special_tokens_mask:
|
1746
|
+
if add_special_tokens:
|
1747
|
+
encoded_inputs["special_tokens_mask"] = (
|
1748
|
+
self.get_special_tokens_mask(ids, pair_ids)
|
1749
|
+
)
|
1750
|
+
else:
|
1751
|
+
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
|
1752
|
+
|
1753
|
+
# Check lengths
|
1754
|
+
self._eventual_warn_about_too_long_sequence(
|
1755
|
+
encoded_inputs["input_ids"], max_length, verbose
|
1756
|
+
)
|
1757
|
+
if return_position_ids:
|
1758
|
+
encoded_inputs["position_ids"] = list(
|
1759
|
+
range(len(encoded_inputs["input_ids"]))
|
1760
|
+
)
|
1761
|
+
|
1762
|
+
if return_length:
|
1763
|
+
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
|
1764
|
+
encoded_inputs["seq_len"] = encoded_inputs["length"]
|
1765
|
+
|
1766
|
+
encoded_inputs["overflow_to_sample"] = example_id
|
1767
|
+
|
1768
|
+
for key, value in encoded_inputs.items():
|
1769
|
+
if key not in batch_outputs:
|
1770
|
+
batch_outputs[key] = []
|
1771
|
+
batch_outputs[key].append(value)
|
1772
|
+
|
1773
|
+
if offset + length == len(second_ids):
|
1774
|
+
break
|
1775
|
+
offset += min(length, stride)
|
1776
|
+
else:
|
1777
|
+
if return_offsets_mapping:
|
1778
|
+
kwargs["text"] = kwargs["texts"][example_id]
|
1779
|
+
kwargs["text_pair"] = None
|
1780
|
+
if kwargs["text_pairs"] is not None:
|
1781
|
+
kwargs["text_pair"] = kwargs["text_pairs"][example_id]
|
1782
|
+
|
1783
|
+
encoded_inputs = self.prepare_for_model(
|
1784
|
+
first_ids,
|
1785
|
+
second_ids,
|
1786
|
+
add_special_tokens=add_special_tokens,
|
1787
|
+
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
|
1788
|
+
truncation=truncation_strategy.value,
|
1789
|
+
max_length=max_length,
|
1790
|
+
stride=stride,
|
1791
|
+
pad_to_multiple_of=None, # we pad in batch afterward
|
1792
|
+
padding_side=padding_side, # we pad in batch afterward
|
1793
|
+
return_position_ids=return_position_ids, # we pad in batch afterward
|
1794
|
+
return_attention_mask=False, # we pad in batch afterward
|
1795
|
+
return_token_type_ids=return_token_type_ids,
|
1796
|
+
return_overflowing_tokens=return_overflowing_tokens,
|
1797
|
+
return_special_tokens_mask=return_special_tokens_mask,
|
1798
|
+
return_offsets_mapping=return_offsets_mapping,
|
1799
|
+
return_length=return_length,
|
1800
|
+
return_tensors=None, # We convert the whole batch to tensors at the end
|
1801
|
+
prepend_batch_axis=False,
|
1802
|
+
verbose=verbose,
|
1803
|
+
**kwargs,
|
1804
|
+
)
|
1805
|
+
for key, value in encoded_inputs.items():
|
1806
|
+
if key not in batch_outputs:
|
1807
|
+
batch_outputs[key] = []
|
1808
|
+
batch_outputs[key].append(value)
|
1809
|
+
|
1810
|
+
batch_outputs = self.pad(
|
1811
|
+
batch_outputs,
|
1812
|
+
padding=padding_strategy.value,
|
1813
|
+
max_length=max_length,
|
1814
|
+
pad_to_multiple_of=pad_to_multiple_of,
|
1815
|
+
padding_side=padding_side,
|
1816
|
+
return_attention_mask=return_attention_mask,
|
1817
|
+
)
|
1818
|
+
if return_dict:
|
1819
|
+
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
|
1820
|
+
return batch_outputs
|
1821
|
+
else:
|
1822
|
+
for k, v in batch_outputs.items():
|
1823
|
+
for i in range(len(v)):
|
1824
|
+
if i >= len(batch_outputs_list):
|
1825
|
+
batch_outputs_list.append({k: v[i]})
|
1826
|
+
else:
|
1827
|
+
batch_outputs_list[i][k] = v[i]
|
1828
|
+
return batch_outputs_list
|
1829
|
+
|
1830
|
+
def _get_bert_like_offset_mapping(self, text: str):
|
1831
|
+
"""
|
1832
|
+
Returns the map of tokens and the start and end index of their start and end character.
|
1833
|
+
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
|
1834
|
+
Args:
|
1835
|
+
text (str):
|
1836
|
+
Input text.
|
1837
|
+
Returns:
|
1838
|
+
list: The offset map of input text.
|
1839
|
+
|
1840
|
+
"""
|
1841
|
+
if text is None:
|
1842
|
+
return None
|
1843
|
+
split_tokens = self.tokenize(text)
|
1844
|
+
|
1845
|
+
normalized_text, char_mapping = "", []
|
1846
|
+
|
1847
|
+
for i, ch in enumerate(text):
|
1848
|
+
if hasattr(self, "do_lower_case") and self.do_lower_case:
|
1849
|
+
ch = ch.lower()
|
1850
|
+
if self.basic_tokenizer.strip_accents is not False:
|
1851
|
+
ch = unicodedata.normalize("NFD", ch)
|
1852
|
+
ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
|
1853
|
+
elif self.basic_tokenizer.strip_accents:
|
1854
|
+
ch = unicodedata.normalize("NFD", ch)
|
1855
|
+
ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
|
1856
|
+
|
1857
|
+
ch = "".join(
|
1858
|
+
[
|
1859
|
+
c
|
1860
|
+
for c in ch
|
1861
|
+
if not (ord(c) == 0 or ord(c) == 0xFFFD or _is_control(c))
|
1862
|
+
]
|
1863
|
+
)
|
1864
|
+
normalized_text += ch
|
1865
|
+
|
1866
|
+
char_mapping.extend([i] * len(ch))
|
1867
|
+
text, token_mapping, offset = normalized_text, [], 0
|
1868
|
+
|
1869
|
+
char_mapping_indexes = []
|
1870
|
+
for index, token in enumerate(split_tokens):
|
1871
|
+
if token[:2] == "##":
|
1872
|
+
token = token[2:]
|
1873
|
+
if token in self.all_special_tokens:
|
1874
|
+
token = (
|
1875
|
+
token.lower()
|
1876
|
+
if hasattr(self, "do_lower_case") and self.do_lower_case
|
1877
|
+
else token
|
1878
|
+
)
|
1879
|
+
# The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
|
1880
|
+
# When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
|
1881
|
+
# https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
|
1882
|
+
if "σ" in token or "ς" in token:
|
1883
|
+
start = (
|
1884
|
+
text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
|
1885
|
+
+ offset
|
1886
|
+
)
|
1887
|
+
else:
|
1888
|
+
|
1889
|
+
# try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
|
1890
|
+
if token not in text[offset:]:
|
1891
|
+
# check whether there are consecutive UNK tokens, eg: ['好', '[UNK]', '[UNK]', 'good']
|
1892
|
+
if (
|
1893
|
+
index < len(split_tokens) - 1
|
1894
|
+
and split_tokens[index + 1] in self.all_special_tokens
|
1895
|
+
):
|
1896
|
+
start = offset
|
1897
|
+
token = " " # only contains one char
|
1898
|
+
else:
|
1899
|
+
start = -1
|
1900
|
+
else:
|
1901
|
+
start = text[offset:].index(token) + offset
|
1902
|
+
|
1903
|
+
end = start + len(token)
|
1904
|
+
char_mapping_indexes.append([start, end])
|
1905
|
+
|
1906
|
+
if start != -1:
|
1907
|
+
offset = end
|
1908
|
+
|
1909
|
+
token_mapping = []
|
1910
|
+
for index, (start, end) in enumerate(char_mapping_indexes):
|
1911
|
+
if start == -1:
|
1912
|
+
# init start
|
1913
|
+
if index == 0:
|
1914
|
+
start = 0
|
1915
|
+
else:
|
1916
|
+
start = char_mapping_indexes[index - 1][1]
|
1917
|
+
|
1918
|
+
# init end
|
1919
|
+
if index == len(char_mapping_indexes) - 1:
|
1920
|
+
end = len(char_mapping)
|
1921
|
+
else:
|
1922
|
+
# next start
|
1923
|
+
end = char_mapping_indexes[index + 1][0]
|
1924
|
+
|
1925
|
+
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
|
1926
|
+
|
1927
|
+
return token_mapping
|
1928
|
+
|
1929
|
+
def get_offset_mapping(self, text: str, split_tokens: Optional[List[str]] = None):
|
1930
|
+
"""
|
1931
|
+
Returns the map of tokens and the start and end index of their start and end character.
|
1932
|
+
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
|
1933
|
+
Args:
|
1934
|
+
text (str):
|
1935
|
+
Input text.
|
1936
|
+
split_tokens (Optional[List[str]]):
|
1937
|
+
the tokens which has been split which can accelerate the operation.
|
1938
|
+
|
1939
|
+
Returns:
|
1940
|
+
list: The offset map of input text.
|
1941
|
+
|
1942
|
+
"""
|
1943
|
+
if text is None:
|
1944
|
+
return None
|
1945
|
+
split_tokens = self.tokenize(text)
|
1946
|
+
|
1947
|
+
# bert-like tokenizer use the old-school code block
|
1948
|
+
if hasattr(self, "basic_tokenizer") or hasattr(self, "wordpiece_tokenizer"):
|
1949
|
+
return self._get_bert_like_offset_mapping(text)
|
1950
|
+
|
1951
|
+
if not split_tokens:
|
1952
|
+
split_tokens = self.tokenize(text)
|
1953
|
+
|
1954
|
+
normalized_text, char_mapping = "", []
|
1955
|
+
|
1956
|
+
for i, ch in enumerate(text):
|
1957
|
+
normalized_text += normalize_chars(ch)
|
1958
|
+
char_mapping.extend([i] * len(ch))
|
1959
|
+
|
1960
|
+
text, token_mapping, offset = normalized_text, [], 0
|
1961
|
+
do_lower_case = getattr(self, "do_lower_case", False)
|
1962
|
+
|
1963
|
+
# lower the text if the token is lower-cased
|
1964
|
+
# keep align with token
|
1965
|
+
if do_lower_case:
|
1966
|
+
text = text.lower()
|
1967
|
+
|
1968
|
+
char_mapping_indexes = []
|
1969
|
+
for token in split_tokens:
|
1970
|
+
|
1971
|
+
# convert tokens into original string
|
1972
|
+
token: str = self.convert_tokens_to_string(token).strip()
|
1973
|
+
|
1974
|
+
if token in self.all_special_tokens:
|
1975
|
+
if do_lower_case:
|
1976
|
+
token = token.lower()
|
1977
|
+
|
1978
|
+
# The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
|
1979
|
+
# When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
|
1980
|
+
# https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
|
1981
|
+
if "σ" in token or "ς" in token:
|
1982
|
+
start = (
|
1983
|
+
text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
|
1984
|
+
+ offset
|
1985
|
+
)
|
1986
|
+
else:
|
1987
|
+
|
1988
|
+
# try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
|
1989
|
+
if token not in text[offset:]:
|
1990
|
+
start = -1
|
1991
|
+
else:
|
1992
|
+
start = text[offset:].index(token) + offset
|
1993
|
+
|
1994
|
+
end = start + len(token)
|
1995
|
+
char_mapping_indexes.append([start, end])
|
1996
|
+
|
1997
|
+
if start != -1:
|
1998
|
+
offset = end
|
1999
|
+
|
2000
|
+
token_mapping = []
|
2001
|
+
for index, (start, end) in enumerate(char_mapping_indexes):
|
2002
|
+
if start == -1:
|
2003
|
+
# init start
|
2004
|
+
if index == 0:
|
2005
|
+
start = 0
|
2006
|
+
else:
|
2007
|
+
start = char_mapping_indexes[index - 1][1]
|
2008
|
+
|
2009
|
+
# init end
|
2010
|
+
if index == len(char_mapping_indexes) - 1:
|
2011
|
+
end = len(char_mapping)
|
2012
|
+
else:
|
2013
|
+
# next start
|
2014
|
+
end = char_mapping_indexes[index + 1][0]
|
2015
|
+
|
2016
|
+
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
|
2017
|
+
|
2018
|
+
return token_mapping
|
2019
|
+
|
2020
|
+
def _decode(
|
2021
|
+
self,
|
2022
|
+
token_ids: List[int],
|
2023
|
+
skip_special_tokens: bool = False,
|
2024
|
+
clean_up_tokenization_spaces: bool = True,
|
2025
|
+
spaces_between_special_tokens: bool = True,
|
2026
|
+
**kwargs,
|
2027
|
+
) -> str:
|
2028
|
+
if isinstance(token_ids, np.ndarray):
|
2029
|
+
token_ids = token_ids.tolist()
|
2030
|
+
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
|
2031
|
+
filtered_tokens = self.convert_ids_to_tokens(
|
2032
|
+
token_ids, skip_special_tokens=skip_special_tokens
|
2033
|
+
)
|
2034
|
+
|
2035
|
+
# To avoid mixing byte-level and unicode for byte-level BPT
|
2036
|
+
# we need to build string separately for added tokens and byte-level tokens
|
2037
|
+
# cf. https://github.com/huggingface/transformers/issues/1133
|
2038
|
+
sub_texts = []
|
2039
|
+
current_sub_text = []
|
2040
|
+
for token in filtered_tokens:
|
2041
|
+
if skip_special_tokens and token in self.all_special_ids:
|
2042
|
+
continue
|
2043
|
+
if token in self.added_tokens_encoder:
|
2044
|
+
if current_sub_text:
|
2045
|
+
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
|
2046
|
+
current_sub_text = []
|
2047
|
+
sub_texts.append(token)
|
2048
|
+
else:
|
2049
|
+
current_sub_text.append(token)
|
2050
|
+
if current_sub_text:
|
2051
|
+
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
|
2052
|
+
|
2053
|
+
if spaces_between_special_tokens:
|
2054
|
+
text = " ".join(sub_texts)
|
2055
|
+
else:
|
2056
|
+
text = "".join(sub_texts)
|
2057
|
+
|
2058
|
+
if clean_up_tokenization_spaces:
|
2059
|
+
clean_text = self.clean_up_tokenization(text)
|
2060
|
+
return clean_text
|
2061
|
+
else:
|
2062
|
+
return text
|
2063
|
+
|
2064
|
+
|
2065
|
+
def _is_control(char):
|
2066
|
+
"""Checks whether `chars` is a control character."""
|
2067
|
+
# These are technically control characters but we count them as whitespace
|
2068
|
+
# characters.
|
2069
|
+
if char == "\t" or char == "\n" or char == "\r":
|
2070
|
+
return False
|
2071
|
+
cat = unicodedata.category(char)
|
2072
|
+
if cat.startswith("C"):
|
2073
|
+
return True
|
2074
|
+
return False
|
2075
|
+
|
2076
|
+
|
2077
|
+
def _is_punctuation(char):
|
2078
|
+
"""Checks whether `chars` is a punctuation character."""
|
2079
|
+
cp = ord(char)
|
2080
|
+
# We treat all non-letter/number ASCII as punctuation.
|
2081
|
+
# Characters such as "^", "$", and "`" are not in the Unicode
|
2082
|
+
# Punctuation class but we treat them as punctuation anyways, for
|
2083
|
+
# consistency.
|
2084
|
+
if (
|
2085
|
+
(cp >= 33 and cp <= 47)
|
2086
|
+
or (cp >= 58 and cp <= 64)
|
2087
|
+
or (cp >= 91 and cp <= 96)
|
2088
|
+
or (cp >= 123 and cp <= 126)
|
2089
|
+
):
|
2090
|
+
return True
|
2091
|
+
cat = unicodedata.category(char)
|
2092
|
+
if cat.startswith("P"):
|
2093
|
+
return True
|
2094
|
+
return False
|
2095
|
+
|
2096
|
+
|
2097
|
+
def _is_symbol(char):
|
2098
|
+
"""Check whether CP is the codepoint of a Symbol character."""
|
2099
|
+
cp = ord(char)
|
2100
|
+
if unicodedata.category(char).startswith("S") or (
|
2101
|
+
cp in [0x00AD, 0x00B2, 0x00BA, 0x3007, 0x00B5, 0x00D8, 0x014B, 0x01B1]
|
2102
|
+
):
|
2103
|
+
return True
|
2104
|
+
return False
|
2105
|
+
|
2106
|
+
|
2107
|
+
def _is_whitespace(char):
|
2108
|
+
"""
|
2109
|
+
Checks whether `chars` is a whitespace character.
|
2110
|
+
"""
|
2111
|
+
# \t, \n, and \r are technically control characters but we treat them
|
2112
|
+
# as whitespace since they are generally considered as such.
|
2113
|
+
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
2114
|
+
return True
|
2115
|
+
cat = unicodedata.category(char)
|
2116
|
+
if cat == "Zs":
|
2117
|
+
return True
|
2118
|
+
return False
|
2119
|
+
|
2120
|
+
|
2121
|
+
def convert_to_unicode(text):
|
2122
|
+
"""
|
2123
|
+
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
|
2124
|
+
Args:
|
2125
|
+
text (str|bytes): Text to be converted to unicode.
|
2126
|
+
Returns:
|
2127
|
+
str: converted text.
|
2128
|
+
"""
|
2129
|
+
if isinstance(text, str):
|
2130
|
+
return text
|
2131
|
+
elif isinstance(text, bytes):
|
2132
|
+
return text.decode("utf-8", "ignore")
|
2133
|
+
else:
|
2134
|
+
raise ValueError("Unsupported string type: %s" % (type(text)))
|
2135
|
+
|
2136
|
+
|
2137
|
+
def whitespace_tokenize(text):
|
2138
|
+
"""
|
2139
|
+
Runs basic whitespace cleaning and splitting on a piece of text.
|
2140
|
+
Args:
|
2141
|
+
text (str): Text to be tokenized.
|
2142
|
+
Returns:
|
2143
|
+
list(str): Token list.
|
2144
|
+
"""
|
2145
|
+
text = text.strip()
|
2146
|
+
if not text:
|
2147
|
+
return []
|
2148
|
+
tokens = text.split()
|
2149
|
+
return tokens
|