openocr-python 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openocr/__init__.py +11 -0
- openocr/configs/det/dbnet/repvit_db.yml +173 -0
- openocr/configs/rec/abinet/resnet45_trans_abinet_lang.yml +94 -0
- openocr/configs/rec/abinet/resnet45_trans_abinet_wo_lang.yml +93 -0
- openocr/configs/rec/abinet/svtrv2_abinet_lang.yml +130 -0
- openocr/configs/rec/abinet/svtrv2_abinet_wo_lang.yml +128 -0
- openocr/configs/rec/aster/resnet31_lstm_aster_tps_on.yml +93 -0
- openocr/configs/rec/aster/svtrv2_aster.yml +127 -0
- openocr/configs/rec/aster/svtrv2_aster_tps_on.yml +102 -0
- openocr/configs/rec/autostr/autostr_lstm_aster_tps_on.yml +95 -0
- openocr/configs/rec/busnet/svtrv2_busnet.yml +135 -0
- openocr/configs/rec/busnet/svtrv2_busnet_pretraining.yml +134 -0
- openocr/configs/rec/busnet/vit_busnet.yml +104 -0
- openocr/configs/rec/busnet/vit_busnet_pretraining.yml +104 -0
- openocr/configs/rec/cam/convnextv2_cam_tps_on.yml +118 -0
- openocr/configs/rec/cam/convnextv2_tiny_cam_tps_on.yml +118 -0
- openocr/configs/rec/cam/svtrv2_cam_tps_on.yml +123 -0
- openocr/configs/rec/cdistnet/resnet45_trans_cdistnet.yml +93 -0
- openocr/configs/rec/cdistnet/svtrv2_cdistnet.yml +139 -0
- openocr/configs/rec/cppd/svtr_base_cppd.yml +123 -0
- openocr/configs/rec/cppd/svtr_base_cppd_ch.yml +126 -0
- openocr/configs/rec/cppd/svtr_base_cppd_h8.yml +123 -0
- openocr/configs/rec/cppd/svtr_base_cppd_syn.yml +124 -0
- openocr/configs/rec/cppd/svtrv2_cppd.yml +150 -0
- openocr/configs/rec/dan/resnet45_fpn_dan.yml +98 -0
- openocr/configs/rec/dan/svtrv2_dan.yml +130 -0
- openocr/configs/rec/focalsvtr/focalsvtr_ctc.yml +137 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_nrtr_gtc.yml +168 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_long_infer.yml +151 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_smtr_long.yml +150 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_stream.yml +152 -0
- openocr/configs/rec/igtr/svtr_base_ds_igtr.yml +157 -0
- openocr/configs/rec/lister/focalsvtr_lister_wo_fem_maxratio12.yml +133 -0
- openocr/configs/rec/lister/svtrv2_lister_wo_fem_maxratio12.yml +138 -0
- openocr/configs/rec/lpv/svtr_base_lpv.yml +124 -0
- openocr/configs/rec/lpv/svtr_base_lpv_wo_glrm.yml +123 -0
- openocr/configs/rec/lpv/svtrv2_lpv.yml +147 -0
- openocr/configs/rec/lpv/svtrv2_lpv_wo_glrm.yml +146 -0
- openocr/configs/rec/maerec/vit_nrtr.yml +116 -0
- openocr/configs/rec/matrn/resnet45_trans_matrn.yml +95 -0
- openocr/configs/rec/matrn/svtrv2_matrn.yml +130 -0
- openocr/configs/rec/mgpstr/svtrv2_mgpstr_only_char.yml +140 -0
- openocr/configs/rec/mgpstr/vit_base_mgpstr_only_char.yml +111 -0
- openocr/configs/rec/mgpstr/vit_large_mgpstr_only_char.yml +110 -0
- openocr/configs/rec/mgpstr/vit_mgpstr.yml +110 -0
- openocr/configs/rec/mgpstr/vit_mgpstr_only_char.yml +110 -0
- openocr/configs/rec/moran/resnet31_lstm_moran.yml +92 -0
- openocr/configs/rec/nrtr/focalsvtr_nrtr_maxraio12.yml +145 -0
- openocr/configs/rec/nrtr/nrtr.yml +107 -0
- openocr/configs/rec/nrtr/svtr_base_nrtr.yml +118 -0
- openocr/configs/rec/nrtr/svtr_base_nrtr_syn.yml +119 -0
- openocr/configs/rec/nrtr/svtrv2_nrtr.yml +146 -0
- openocr/configs/rec/ote/svtr_base_h8_ote.yml +117 -0
- openocr/configs/rec/ote/svtr_base_ote.yml +116 -0
- openocr/configs/rec/parseq/focalsvtr_parseq_maxratio12.yml +140 -0
- openocr/configs/rec/parseq/svrtv2_parseq.yml +136 -0
- openocr/configs/rec/parseq/vit_parseq.yml +100 -0
- openocr/configs/rec/robustscanner/resnet31_robustscanner.yml +102 -0
- openocr/configs/rec/robustscanner/svtrv2_robustscanner.yml +134 -0
- openocr/configs/rec/sar/resnet31_lstm_sar.yml +94 -0
- openocr/configs/rec/sar/svtrv2_sar.yml +128 -0
- openocr/configs/rec/seed/resnet31_lstm_seed_tps_on.yml +96 -0
- openocr/configs/rec/smtr/focalsvtr_smtr.yml +150 -0
- openocr/configs/rec/smtr/focalsvtr_smtr_long.yml +133 -0
- openocr/configs/rec/smtr/svtrv2_smtr.yml +150 -0
- openocr/configs/rec/smtr/svtrv2_smtr_bi.yml +136 -0
- openocr/configs/rec/srn/resnet50_fpn_srn.yml +97 -0
- openocr/configs/rec/srn/svtrv2_srn.yml +131 -0
- openocr/configs/rec/svtrs/convnextv2_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_h8_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_h8_rctc.yml +106 -0
- openocr/configs/rec/svtrs/convnextv2_rctc.yml +106 -0
- openocr/configs/rec/svtrs/convnextv2_tiny_h8_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_tiny_h8_rctc.yml +106 -0
- openocr/configs/rec/svtrs/crnn_ctc.yml +99 -0
- openocr/configs/rec/svtrs/crnn_ctc_long.yml +116 -0
- openocr/configs/rec/svtrs/focalnet_base_ctc.yml +108 -0
- openocr/configs/rec/svtrs/focalnet_base_rctc.yml +109 -0
- openocr/configs/rec/svtrs/focalsvtr_ctc.yml +106 -0
- openocr/configs/rec/svtrs/focalsvtr_rctc.yml +107 -0
- openocr/configs/rec/svtrs/resnet45_trans_ctc.yml +103 -0
- openocr/configs/rec/svtrs/resnet45_trans_rctc.yml +104 -0
- openocr/configs/rec/svtrs/svtr_base_ctc.yml +110 -0
- openocr/configs/rec/svtrs/svtr_base_rctc.yml +111 -0
- openocr/configs/rec/svtrs/svtrnet_ctc_syn.yml +111 -0
- openocr/configs/rec/svtrs/vit_ctc.yml +103 -0
- openocr/configs/rec/svtrs/vit_rctc.yml +103 -0
- openocr/configs/rec/svtrv2/repsvtr_ch.yml +121 -0
- openocr/configs/rec/svtrv2/svtrv2_ch.yml +133 -0
- openocr/configs/rec/svtrv2/svtrv2_ctc.yml +136 -0
- openocr/configs/rec/svtrv2/svtrv2_rctc.yml +135 -0
- openocr/configs/rec/svtrv2/svtrv2_small_rctc.yml +135 -0
- openocr/configs/rec/svtrv2/svtrv2_smtr_gtc_rctc.yml +162 -0
- openocr/configs/rec/svtrv2/svtrv2_smtr_gtc_rctc_ch.yml +153 -0
- openocr/configs/rec/svtrv2/svtrv2_tiny_rctc.yml +135 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LA.yml +103 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LF_1.yml +102 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LF_2.yml +103 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LA.yml +112 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LF_1.yml +111 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LF_2.yml +112 -0
- openocr/demo_gradio.py +128 -0
- openocr/opendet/modeling/__init__.py +11 -0
- openocr/opendet/modeling/backbones/__init__.py +14 -0
- openocr/opendet/modeling/backbones/repvit.py +340 -0
- openocr/opendet/modeling/base_detector.py +69 -0
- openocr/opendet/modeling/heads/__init__.py +14 -0
- openocr/opendet/modeling/heads/db_head.py +73 -0
- openocr/opendet/modeling/necks/__init__.py +14 -0
- openocr/opendet/modeling/necks/db_fpn.py +609 -0
- openocr/opendet/postprocess/__init__.py +18 -0
- openocr/opendet/postprocess/db_postprocess.py +273 -0
- openocr/opendet/preprocess/__init__.py +154 -0
- openocr/opendet/preprocess/crop_resize.py +121 -0
- openocr/opendet/preprocess/db_resize_for_test.py +135 -0
- openocr/openrec/losses/__init__.py +62 -0
- openocr/openrec/losses/abinet_loss.py +42 -0
- openocr/openrec/losses/ar_loss.py +23 -0
- openocr/openrec/losses/cam_loss.py +48 -0
- openocr/openrec/losses/cdistnet_loss.py +34 -0
- openocr/openrec/losses/ce_loss.py +68 -0
- openocr/openrec/losses/cppd_loss.py +77 -0
- openocr/openrec/losses/ctc_loss.py +33 -0
- openocr/openrec/losses/igtr_loss.py +12 -0
- openocr/openrec/losses/lister_loss.py +14 -0
- openocr/openrec/losses/lpv_loss.py +30 -0
- openocr/openrec/losses/mgp_loss.py +34 -0
- openocr/openrec/losses/parseq_loss.py +12 -0
- openocr/openrec/losses/robustscanner_loss.py +20 -0
- openocr/openrec/losses/seed_loss.py +46 -0
- openocr/openrec/losses/smtr_loss.py +12 -0
- openocr/openrec/losses/srn_loss.py +40 -0
- openocr/openrec/losses/visionlan_loss.py +58 -0
- openocr/openrec/metrics/__init__.py +19 -0
- openocr/openrec/metrics/rec_metric.py +270 -0
- openocr/openrec/metrics/rec_metric_gtc.py +58 -0
- openocr/openrec/metrics/rec_metric_long.py +142 -0
- openocr/openrec/metrics/rec_metric_mgp.py +93 -0
- openocr/openrec/modeling/__init__.py +11 -0
- openocr/openrec/modeling/base_recognizer.py +69 -0
- openocr/openrec/modeling/common.py +238 -0
- openocr/openrec/modeling/decoders/__init__.py +109 -0
- openocr/openrec/modeling/decoders/abinet_decoder.py +283 -0
- openocr/openrec/modeling/decoders/aster_decoder.py +170 -0
- openocr/openrec/modeling/decoders/bus_decoder.py +133 -0
- openocr/openrec/modeling/decoders/cam_decoder.py +43 -0
- openocr/openrec/modeling/decoders/cdistnet_decoder.py +334 -0
- openocr/openrec/modeling/decoders/cppd_decoder.py +393 -0
- openocr/openrec/modeling/decoders/ctc_decoder.py +203 -0
- openocr/openrec/modeling/decoders/dan_decoder.py +203 -0
- openocr/openrec/modeling/decoders/igtr_decoder.py +815 -0
- openocr/openrec/modeling/decoders/lister_decoder.py +535 -0
- openocr/openrec/modeling/decoders/lpv_decoder.py +119 -0
- openocr/openrec/modeling/decoders/matrn_decoder.py +236 -0
- openocr/openrec/modeling/decoders/mgp_decoder.py +99 -0
- openocr/openrec/modeling/decoders/nrtr_decoder.py +439 -0
- openocr/openrec/modeling/decoders/ote_decoder.py +205 -0
- openocr/openrec/modeling/decoders/parseq_decoder.py +504 -0
- openocr/openrec/modeling/decoders/rctc_decoder.py +70 -0
- openocr/openrec/modeling/decoders/robustscanner_decoder.py +749 -0
- openocr/openrec/modeling/decoders/sar_decoder.py +236 -0
- openocr/openrec/modeling/decoders/smtr_decoder.py +621 -0
- openocr/openrec/modeling/decoders/smtr_decoder_nattn.py +521 -0
- openocr/openrec/modeling/decoders/srn_decoder.py +283 -0
- openocr/openrec/modeling/decoders/visionlan_decoder.py +321 -0
- openocr/openrec/modeling/encoders/__init__.py +39 -0
- openocr/openrec/modeling/encoders/autostr_encoder.py +327 -0
- openocr/openrec/modeling/encoders/cam_encoder.py +760 -0
- openocr/openrec/modeling/encoders/convnextv2.py +213 -0
- openocr/openrec/modeling/encoders/focalsvtr.py +631 -0
- openocr/openrec/modeling/encoders/nrtr_encoder.py +28 -0
- openocr/openrec/modeling/encoders/rec_hgnet.py +346 -0
- openocr/openrec/modeling/encoders/rec_lcnetv3.py +488 -0
- openocr/openrec/modeling/encoders/rec_mobilenet_v3.py +132 -0
- openocr/openrec/modeling/encoders/rec_mv1_enhance.py +254 -0
- openocr/openrec/modeling/encoders/rec_nrtr_mtb.py +37 -0
- openocr/openrec/modeling/encoders/rec_resnet_31.py +213 -0
- openocr/openrec/modeling/encoders/rec_resnet_45.py +183 -0
- openocr/openrec/modeling/encoders/rec_resnet_fpn.py +216 -0
- openocr/openrec/modeling/encoders/rec_resnet_vd.py +252 -0
- openocr/openrec/modeling/encoders/repvit.py +338 -0
- openocr/openrec/modeling/encoders/resnet31_rnn.py +123 -0
- openocr/openrec/modeling/encoders/svtrnet.py +574 -0
- openocr/openrec/modeling/encoders/svtrnet2dpos.py +616 -0
- openocr/openrec/modeling/encoders/svtrv2.py +470 -0
- openocr/openrec/modeling/encoders/svtrv2_lnconv.py +503 -0
- openocr/openrec/modeling/encoders/svtrv2_lnconv_two33.py +517 -0
- openocr/openrec/modeling/encoders/vit.py +120 -0
- openocr/openrec/modeling/transforms/__init__.py +15 -0
- openocr/openrec/modeling/transforms/aster_tps.py +262 -0
- openocr/openrec/modeling/transforms/moran.py +136 -0
- openocr/openrec/modeling/transforms/tps.py +246 -0
- openocr/openrec/optimizer/__init__.py +73 -0
- openocr/openrec/optimizer/lr.py +227 -0
- openocr/openrec/postprocess/__init__.py +72 -0
- openocr/openrec/postprocess/abinet_postprocess.py +37 -0
- openocr/openrec/postprocess/ar_postprocess.py +63 -0
- openocr/openrec/postprocess/ce_postprocess.py +43 -0
- openocr/openrec/postprocess/char_postprocess.py +108 -0
- openocr/openrec/postprocess/cppd_postprocess.py +42 -0
- openocr/openrec/postprocess/ctc_postprocess.py +119 -0
- openocr/openrec/postprocess/igtr_postprocess.py +100 -0
- openocr/openrec/postprocess/lister_postprocess.py +59 -0
- openocr/openrec/postprocess/mgp_postprocess.py +143 -0
- openocr/openrec/postprocess/nrtr_postprocess.py +75 -0
- openocr/openrec/postprocess/smtr_postprocess.py +73 -0
- openocr/openrec/postprocess/srn_postprocess.py +80 -0
- openocr/openrec/postprocess/visionlan_postprocess.py +81 -0
- openocr/openrec/preprocess/__init__.py +173 -0
- openocr/openrec/preprocess/abinet_aug.py +473 -0
- openocr/openrec/preprocess/abinet_label_encode.py +36 -0
- openocr/openrec/preprocess/ar_label_encode.py +36 -0
- openocr/openrec/preprocess/auto_augment.py +1012 -0
- openocr/openrec/preprocess/cam_label_encode.py +141 -0
- openocr/openrec/preprocess/ce_label_encode.py +116 -0
- openocr/openrec/preprocess/char_label_encode.py +36 -0
- openocr/openrec/preprocess/cppd_label_encode.py +173 -0
- openocr/openrec/preprocess/ctc_label_encode.py +124 -0
- openocr/openrec/preprocess/ep_label_encode.py +38 -0
- openocr/openrec/preprocess/igtr_label_encode.py +360 -0
- openocr/openrec/preprocess/mgp_label_encode.py +95 -0
- openocr/openrec/preprocess/parseq_aug.py +150 -0
- openocr/openrec/preprocess/rec_aug.py +211 -0
- openocr/openrec/preprocess/resize.py +534 -0
- openocr/openrec/preprocess/smtr_label_encode.py +125 -0
- openocr/openrec/preprocess/srn_label_encode.py +37 -0
- openocr/openrec/preprocess/visionlan_label_encode.py +67 -0
- openocr/tools/create_lmdb_dataset.py +118 -0
- openocr/tools/data/__init__.py +94 -0
- openocr/tools/data/collate_fn.py +100 -0
- openocr/tools/data/lmdb_dataset.py +142 -0
- openocr/tools/data/lmdb_dataset_test.py +166 -0
- openocr/tools/data/multi_scale_sampler.py +177 -0
- openocr/tools/data/ratio_dataset.py +217 -0
- openocr/tools/data/ratio_dataset_test.py +273 -0
- openocr/tools/data/ratio_dataset_tvresize.py +213 -0
- openocr/tools/data/ratio_dataset_tvresize_test.py +276 -0
- openocr/tools/data/ratio_sampler.py +190 -0
- openocr/tools/data/simple_dataset.py +263 -0
- openocr/tools/data/strlmdb_dataset.py +143 -0
- openocr/tools/engine/__init__.py +5 -0
- openocr/tools/engine/config.py +158 -0
- openocr/tools/engine/trainer.py +621 -0
- openocr/tools/eval_rec.py +41 -0
- openocr/tools/eval_rec_all_ch.py +184 -0
- openocr/tools/eval_rec_all_en.py +206 -0
- openocr/tools/eval_rec_all_long.py +119 -0
- openocr/tools/eval_rec_all_long_simple.py +122 -0
- openocr/tools/export_rec.py +118 -0
- openocr/tools/infer/onnx_engine.py +65 -0
- openocr/tools/infer/predict_rec.py +140 -0
- openocr/tools/infer/utility.py +234 -0
- openocr/tools/infer_det.py +449 -0
- openocr/tools/infer_e2e.py +462 -0
- openocr/tools/infer_e2e_parallel.py +184 -0
- openocr/tools/infer_rec.py +371 -0
- openocr/tools/train_rec.py +37 -0
- openocr/tools/utility.py +45 -0
- openocr/tools/utils/EN_symbol_dict.txt +94 -0
- openocr/tools/utils/__init__.py +0 -0
- openocr/tools/utils/ckpt.py +87 -0
- openocr/tools/utils/dict/ar_dict.txt +117 -0
- openocr/tools/utils/dict/arabic_dict.txt +161 -0
- openocr/tools/utils/dict/be_dict.txt +145 -0
- openocr/tools/utils/dict/bg_dict.txt +140 -0
- openocr/tools/utils/dict/chinese_cht_dict.txt +8421 -0
- openocr/tools/utils/dict/cyrillic_dict.txt +163 -0
- openocr/tools/utils/dict/devanagari_dict.txt +167 -0
- openocr/tools/utils/dict/en_dict.txt +63 -0
- openocr/tools/utils/dict/fa_dict.txt +136 -0
- openocr/tools/utils/dict/french_dict.txt +136 -0
- openocr/tools/utils/dict/german_dict.txt +143 -0
- openocr/tools/utils/dict/hi_dict.txt +162 -0
- openocr/tools/utils/dict/it_dict.txt +118 -0
- openocr/tools/utils/dict/japan_dict.txt +4399 -0
- openocr/tools/utils/dict/ka_dict.txt +153 -0
- openocr/tools/utils/dict/kie_dict/xfund_class_list.txt +4 -0
- openocr/tools/utils/dict/korean_dict.txt +3688 -0
- openocr/tools/utils/dict/latex_symbol_dict.txt +111 -0
- openocr/tools/utils/dict/latin_dict.txt +185 -0
- openocr/tools/utils/dict/layout_dict/layout_cdla_dict.txt +10 -0
- openocr/tools/utils/dict/layout_dict/layout_publaynet_dict.txt +5 -0
- openocr/tools/utils/dict/layout_dict/layout_table_dict.txt +1 -0
- openocr/tools/utils/dict/mr_dict.txt +153 -0
- openocr/tools/utils/dict/ne_dict.txt +153 -0
- openocr/tools/utils/dict/oc_dict.txt +96 -0
- openocr/tools/utils/dict/pu_dict.txt +130 -0
- openocr/tools/utils/dict/rs_dict.txt +91 -0
- openocr/tools/utils/dict/rsc_dict.txt +134 -0
- openocr/tools/utils/dict/ru_dict.txt +125 -0
- openocr/tools/utils/dict/spin_dict.txt +68 -0
- openocr/tools/utils/dict/ta_dict.txt +128 -0
- openocr/tools/utils/dict/table_dict.txt +277 -0
- openocr/tools/utils/dict/table_master_structure_dict.txt +39 -0
- openocr/tools/utils/dict/table_structure_dict.txt +28 -0
- openocr/tools/utils/dict/table_structure_dict_ch.txt +48 -0
- openocr/tools/utils/dict/te_dict.txt +151 -0
- openocr/tools/utils/dict/ug_dict.txt +114 -0
- openocr/tools/utils/dict/uk_dict.txt +142 -0
- openocr/tools/utils/dict/ur_dict.txt +137 -0
- openocr/tools/utils/dict/xi_dict.txt +110 -0
- openocr/tools/utils/dict90.txt +90 -0
- openocr/tools/utils/e2e_metric/Deteval.py +802 -0
- openocr/tools/utils/e2e_metric/polygon_fast.py +70 -0
- openocr/tools/utils/e2e_utils/extract_batchsize.py +86 -0
- openocr/tools/utils/e2e_utils/extract_textpoint_fast.py +479 -0
- openocr/tools/utils/e2e_utils/extract_textpoint_slow.py +582 -0
- openocr/tools/utils/e2e_utils/pgnet_pp_utils.py +159 -0
- openocr/tools/utils/e2e_utils/visual.py +152 -0
- openocr/tools/utils/en_dict.txt +95 -0
- openocr/tools/utils/gen_label.py +68 -0
- openocr/tools/utils/ic15_dict.txt +36 -0
- openocr/tools/utils/logging.py +56 -0
- openocr/tools/utils/poly_nms.py +132 -0
- openocr/tools/utils/ppocr_keys_v1.txt +6623 -0
- openocr/tools/utils/stats.py +58 -0
- openocr/tools/utils/utility.py +165 -0
- openocr/tools/utils/visual.py +117 -0
- openocr_python-0.0.2.dist-info/LICENCE +201 -0
- openocr_python-0.0.2.dist-info/METADATA +98 -0
- openocr_python-0.0.2.dist-info/RECORD +323 -0
- openocr_python-0.0.2.dist-info/WHEEL +5 -0
- openocr_python-0.0.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
|
5
|
+
sys.path.append(__dir__)
|
|
6
|
+
sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '..')))
|
|
7
|
+
|
|
8
|
+
import torch
|
|
9
|
+
|
|
10
|
+
from openrec.modeling import build_model
|
|
11
|
+
from openrec.postprocess import build_post_process
|
|
12
|
+
from tools.engine import Config
|
|
13
|
+
from tools.infer_rec import build_rec_process
|
|
14
|
+
from tools.utility import ArgsParser
|
|
15
|
+
from tools.utils.ckpt import load_ckpt
|
|
16
|
+
from tools.utils.logging import get_logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def to_onnx(model, dummy_input, dynamic_axes, sava_path='model.onnx'):
|
|
20
|
+
input_axis_name = ['batch_size', 'channel', 'in_width', 'int_height']
|
|
21
|
+
output_axis_name = ['batch_size', 'channel', 'out_width', 'out_height']
|
|
22
|
+
torch.onnx.export(
|
|
23
|
+
model.to('cpu'),
|
|
24
|
+
dummy_input,
|
|
25
|
+
sava_path,
|
|
26
|
+
input_names=['input'],
|
|
27
|
+
output_names=['output'], # the model's output names
|
|
28
|
+
dynamic_axes={
|
|
29
|
+
'input': {axis: input_axis_name[axis]
|
|
30
|
+
for axis in dynamic_axes},
|
|
31
|
+
'output': {axis: output_axis_name[axis]
|
|
32
|
+
for axis in dynamic_axes},
|
|
33
|
+
},
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def export_single_model(model: torch.nn.Module, _cfg, export_dir,
|
|
38
|
+
export_config, logger, type):
|
|
39
|
+
for layer in model.modules():
|
|
40
|
+
if hasattr(layer, 'rep') and not getattr(layer, 'is_repped'):
|
|
41
|
+
layer.rep()
|
|
42
|
+
os.makedirs(export_dir, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
export_cfg = {'PostProcess': _cfg['PostProcess']}
|
|
45
|
+
export_cfg['Transforms'] = build_rec_process(_cfg)
|
|
46
|
+
|
|
47
|
+
cfg.save(os.path.join(export_dir, 'config.yaml'), export_cfg)
|
|
48
|
+
|
|
49
|
+
dummy_input = torch.randn(*export_config['export_shape'], device='cpu')
|
|
50
|
+
if type == 'script':
|
|
51
|
+
save_path = os.path.join(export_dir, 'model.pt')
|
|
52
|
+
trace_model = torch.jit.trace(model, dummy_input, strict=False)
|
|
53
|
+
torch.jit.save(trace_model, save_path)
|
|
54
|
+
elif type == 'onnx':
|
|
55
|
+
save_path = os.path.join(export_dir, 'model.onnx')
|
|
56
|
+
to_onnx(model, dummy_input, export_config.get('dynamic_axes', []),
|
|
57
|
+
save_path)
|
|
58
|
+
else:
|
|
59
|
+
raise NotImplementedError
|
|
60
|
+
logger.info(f'finish export model to {save_path}')
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def main(cfg, type):
|
|
64
|
+
_cfg = cfg.cfg
|
|
65
|
+
logger = get_logger()
|
|
66
|
+
global_config = _cfg['Global']
|
|
67
|
+
export_config = _cfg['Export']
|
|
68
|
+
# build post process
|
|
69
|
+
post_process_class = build_post_process(_cfg['PostProcess'])
|
|
70
|
+
char_num = len(getattr(post_process_class, 'character'))
|
|
71
|
+
cfg['Architecture']['Decoder']['out_channels'] = char_num
|
|
72
|
+
model = build_model(_cfg['Architecture'])
|
|
73
|
+
|
|
74
|
+
load_ckpt(model, _cfg)
|
|
75
|
+
model.eval()
|
|
76
|
+
|
|
77
|
+
export_dir = export_config.get('export_dir', '')
|
|
78
|
+
if not export_dir:
|
|
79
|
+
export_dir = os.path.join(global_config.get('output_dir', 'output'),
|
|
80
|
+
'export')
|
|
81
|
+
|
|
82
|
+
if _cfg['Architecture']['algorithm'] in ['Distillation'
|
|
83
|
+
]: # distillation model
|
|
84
|
+
_cfg['PostProcess'][
|
|
85
|
+
'name'] = post_process_class.__class__.__base__.__name__
|
|
86
|
+
for model_name in model.model_list:
|
|
87
|
+
sub_model_save_path = os.path.join(export_dir, model_name)
|
|
88
|
+
export_single_model(
|
|
89
|
+
model.model_list[model_name],
|
|
90
|
+
_cfg,
|
|
91
|
+
sub_model_save_path,
|
|
92
|
+
export_config,
|
|
93
|
+
logger,
|
|
94
|
+
type,
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
export_single_model(model, _cfg, export_dir, export_config, logger,
|
|
98
|
+
type)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def parse_args():
|
|
102
|
+
parser = ArgsParser()
|
|
103
|
+
parser.add_argument('--type',
|
|
104
|
+
type=str,
|
|
105
|
+
default='onnx',
|
|
106
|
+
help='type of export')
|
|
107
|
+
args = parser.parse_args()
|
|
108
|
+
return args
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
if __name__ == '__main__':
|
|
112
|
+
FLAGS = parse_args()
|
|
113
|
+
cfg = Config(FLAGS.config)
|
|
114
|
+
FLAGS = vars(FLAGS)
|
|
115
|
+
opt = FLAGS.pop('opt')
|
|
116
|
+
cfg.merge_dict(FLAGS)
|
|
117
|
+
cfg.merge_dict(opt)
|
|
118
|
+
main(cfg, FLAGS['type'])
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import onnxruntime
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ONNXEngine:
|
|
7
|
+
|
|
8
|
+
def __init__(self, onnx_path, use_gpu):
|
|
9
|
+
"""
|
|
10
|
+
:param onnx_path:
|
|
11
|
+
"""
|
|
12
|
+
if not os.path.exists(onnx_path):
|
|
13
|
+
raise Exception(f'{onnx_path} is not exists')
|
|
14
|
+
|
|
15
|
+
providers = ['CPUExecutionProvider']
|
|
16
|
+
if use_gpu:
|
|
17
|
+
providers = ([
|
|
18
|
+
'TensorrtExecutionProvider',
|
|
19
|
+
'CUDAExecutionProvider',
|
|
20
|
+
'CPUExecutionProvider',
|
|
21
|
+
], )
|
|
22
|
+
self.onnx_session = onnxruntime.InferenceSession(onnx_path,
|
|
23
|
+
providers=providers)
|
|
24
|
+
self.input_name = self.get_input_name(self.onnx_session)
|
|
25
|
+
self.output_name = self.get_output_name(self.onnx_session)
|
|
26
|
+
|
|
27
|
+
def get_output_name(self, onnx_session):
|
|
28
|
+
"""
|
|
29
|
+
output_name = onnx_session.get_outputs()[0].name
|
|
30
|
+
:param onnx_session:
|
|
31
|
+
:return:
|
|
32
|
+
"""
|
|
33
|
+
output_name = []
|
|
34
|
+
for node in onnx_session.get_outputs():
|
|
35
|
+
output_name.append(node.name)
|
|
36
|
+
return output_name
|
|
37
|
+
|
|
38
|
+
def get_input_name(self, onnx_session):
|
|
39
|
+
"""
|
|
40
|
+
input_name = onnx_session.get_inputs()[0].name
|
|
41
|
+
:param onnx_session:
|
|
42
|
+
:return:
|
|
43
|
+
"""
|
|
44
|
+
input_name = []
|
|
45
|
+
for node in onnx_session.get_inputs():
|
|
46
|
+
input_name.append(node.name)
|
|
47
|
+
return input_name
|
|
48
|
+
|
|
49
|
+
def get_input_feed(self, input_name, image_numpy):
|
|
50
|
+
"""
|
|
51
|
+
input_feed={self.input_name: image_numpy}
|
|
52
|
+
:param input_name:
|
|
53
|
+
:param image_numpy:
|
|
54
|
+
:return:
|
|
55
|
+
"""
|
|
56
|
+
input_feed = {}
|
|
57
|
+
for name in input_name:
|
|
58
|
+
input_feed[name] = image_numpy
|
|
59
|
+
return input_feed
|
|
60
|
+
|
|
61
|
+
def run(self, image_numpy):
|
|
62
|
+
# 输入数据的类型必须与模型一致,以下三种写法都是可以的
|
|
63
|
+
input_feed = self.get_input_feed(self.input_name, image_numpy)
|
|
64
|
+
result = self.onnx_session.run(self.output_name, input_feed=input_feed)
|
|
65
|
+
return result
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
|
5
|
+
sys.path.append(__dir__)
|
|
6
|
+
sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '../..')))
|
|
7
|
+
|
|
8
|
+
import math
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import cv2
|
|
12
|
+
import numpy as np
|
|
13
|
+
|
|
14
|
+
from openrec.postprocess import build_post_process
|
|
15
|
+
from openrec.preprocess import create_operators, transform
|
|
16
|
+
from tools.engine import Config
|
|
17
|
+
from tools.infer.onnx_engine import ONNXEngine
|
|
18
|
+
from tools.infer.utility import check_gpu, parse_args
|
|
19
|
+
from tools.utils.logging import get_logger
|
|
20
|
+
from tools.utils.utility import check_and_read, get_image_file_list
|
|
21
|
+
|
|
22
|
+
logger = get_logger()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class TextRecognizer(ONNXEngine):
|
|
26
|
+
|
|
27
|
+
def __init__(self, args):
|
|
28
|
+
if args.rec_model_dir is None or not os.path.exists(
|
|
29
|
+
args.rec_model_dir):
|
|
30
|
+
raise Exception(
|
|
31
|
+
f'args.rec_model_dir is set to {args.rec_model_dir}, but it is not exists'
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
onnx_path = os.path.join(args.rec_model_dir, 'model.onnx')
|
|
35
|
+
config_path = os.path.join(args.rec_model_dir, 'config.yaml')
|
|
36
|
+
super(TextRecognizer, self).__init__(onnx_path, args.use_gpu)
|
|
37
|
+
|
|
38
|
+
self.rec_image_shape = [
|
|
39
|
+
int(v) for v in args.rec_image_shape.split(',')
|
|
40
|
+
]
|
|
41
|
+
self.rec_batch_num = args.rec_batch_num
|
|
42
|
+
self.rec_algorithm = args.rec_algorithm
|
|
43
|
+
|
|
44
|
+
cfg = Config(config_path).cfg
|
|
45
|
+
self.ops = create_operators(cfg['Transforms'][1:])
|
|
46
|
+
self.postprocess_op = build_post_process(cfg['PostProcess'])
|
|
47
|
+
|
|
48
|
+
def resize_norm_img(self, img, max_wh_ratio):
|
|
49
|
+
imgC, imgH, imgW = self.rec_image_shape
|
|
50
|
+
assert imgC == img.shape[2]
|
|
51
|
+
imgW = int((imgH * max_wh_ratio))
|
|
52
|
+
h, w = img.shape[:2]
|
|
53
|
+
ratio = w / float(h)
|
|
54
|
+
if math.ceil(imgH * ratio) > imgW:
|
|
55
|
+
resized_w = imgW
|
|
56
|
+
else:
|
|
57
|
+
resized_w = int(math.ceil(imgH * ratio))
|
|
58
|
+
resized_image = cv2.resize(img, (resized_w, imgH))
|
|
59
|
+
resized_image = resized_image.astype('float32')
|
|
60
|
+
resized_image = resized_image.transpose((2, 0, 1)) / 255
|
|
61
|
+
resized_image -= 0.5
|
|
62
|
+
resized_image /= 0.5
|
|
63
|
+
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
|
|
64
|
+
padding_im[:, :, 0:resized_w] = resized_image
|
|
65
|
+
return padding_im
|
|
66
|
+
|
|
67
|
+
def __call__(self, img_list):
|
|
68
|
+
img_num = len(img_list)
|
|
69
|
+
# Calculate the aspect ratio of all text bars
|
|
70
|
+
width_list = []
|
|
71
|
+
for img in img_list:
|
|
72
|
+
width_list.append(img.shape[1] / float(img.shape[0]))
|
|
73
|
+
# Sorting can speed up the recognition process
|
|
74
|
+
indices = np.argsort(np.array(width_list))
|
|
75
|
+
rec_res = [['', 0.0]] * img_num
|
|
76
|
+
batch_num = self.rec_batch_num
|
|
77
|
+
st = time.time()
|
|
78
|
+
for beg_img_no in range(0, img_num, batch_num):
|
|
79
|
+
end_img_no = min(img_num, beg_img_no + batch_num)
|
|
80
|
+
norm_img_batch = []
|
|
81
|
+
imgC, imgH, imgW = self.rec_image_shape[:3]
|
|
82
|
+
max_wh_ratio = imgW / imgH
|
|
83
|
+
# max_wh_ratio = 0
|
|
84
|
+
for ino in range(beg_img_no, end_img_no):
|
|
85
|
+
h, w = img_list[indices[ino]].shape[0:2]
|
|
86
|
+
wh_ratio = w * 1.0 / h
|
|
87
|
+
max_wh_ratio = max(max_wh_ratio, wh_ratio)
|
|
88
|
+
for ino in range(beg_img_no, end_img_no):
|
|
89
|
+
if self.rec_algorithm == 'nrtr':
|
|
90
|
+
norm_img = transform({'image': img_list[indices[ino]]},
|
|
91
|
+
self.ops)[0]
|
|
92
|
+
else:
|
|
93
|
+
norm_img = self.resize_norm_img(img_list[indices[ino]],
|
|
94
|
+
max_wh_ratio)
|
|
95
|
+
norm_img = norm_img[np.newaxis, :]
|
|
96
|
+
norm_img_batch.append(norm_img)
|
|
97
|
+
norm_img_batch = np.concatenate(norm_img_batch)
|
|
98
|
+
norm_img_batch = norm_img_batch.copy()
|
|
99
|
+
|
|
100
|
+
preds = self.run(norm_img_batch)
|
|
101
|
+
|
|
102
|
+
if len(preds) == 1:
|
|
103
|
+
preds = preds[0]
|
|
104
|
+
|
|
105
|
+
rec_result = self.postprocess_op({'res': preds})
|
|
106
|
+
for rno in range(len(rec_result)):
|
|
107
|
+
rec_res[indices[beg_img_no + rno]] = rec_result[rno]
|
|
108
|
+
return rec_res, time.time() - st
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def main(args):
|
|
112
|
+
args.use_gpu = check_gpu(args.use_gpu)
|
|
113
|
+
|
|
114
|
+
image_file_list = get_image_file_list(args.image_dir)
|
|
115
|
+
text_recognizer = TextRecognizer(args)
|
|
116
|
+
valid_image_file_list = []
|
|
117
|
+
img_list = []
|
|
118
|
+
|
|
119
|
+
# warmup 2 times
|
|
120
|
+
if args.warmup:
|
|
121
|
+
img = np.random.uniform(0, 255, [48, 320, 3]).astype(np.uint8)
|
|
122
|
+
for i in range(2):
|
|
123
|
+
text_recognizer([img] * int(args.rec_batch_num))
|
|
124
|
+
|
|
125
|
+
for image_file in image_file_list:
|
|
126
|
+
img, flag, _ = check_and_read(image_file)
|
|
127
|
+
if not flag:
|
|
128
|
+
img = cv2.imread(image_file)
|
|
129
|
+
if img is None:
|
|
130
|
+
logger.info(f'error in loading image:{image_file}')
|
|
131
|
+
continue
|
|
132
|
+
valid_image_file_list.append(image_file)
|
|
133
|
+
img_list.append(img)
|
|
134
|
+
rec_res, _ = text_recognizer(img_list)
|
|
135
|
+
for ino in range(len(img_list)):
|
|
136
|
+
logger.info(f'result of {valid_image_file_list[ino]}:{rec_res[ino]}')
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if __name__ == '__main__':
|
|
140
|
+
main(parse_args())
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import math
|
|
3
|
+
|
|
4
|
+
import cv2
|
|
5
|
+
import numpy as np
|
|
6
|
+
import torch
|
|
7
|
+
import PIL
|
|
8
|
+
from PIL import Image, ImageDraw, ImageFont
|
|
9
|
+
import random
|
|
10
|
+
|
|
11
|
+
def str2bool(v):
|
|
12
|
+
return v.lower() in ('true', 'yes', 't', 'y', '1')
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def str2int_tuple(v):
|
|
16
|
+
return tuple([int(i.strip()) for i in v.split(',')])
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def init_args():
|
|
20
|
+
parser = argparse.ArgumentParser()
|
|
21
|
+
# params for prediction engine
|
|
22
|
+
parser.add_argument('--use_gpu', type=str2bool, default=False)
|
|
23
|
+
|
|
24
|
+
# params for text detector
|
|
25
|
+
parser.add_argument('--image_dir', type=str)
|
|
26
|
+
parser.add_argument('--det_algorithm', type=str, default='DB')
|
|
27
|
+
parser.add_argument('--det_model_dir', type=str)
|
|
28
|
+
parser.add_argument('--det_limit_side_len', type=float, default=960)
|
|
29
|
+
parser.add_argument('--det_limit_type', type=str, default='max')
|
|
30
|
+
parser.add_argument('--det_box_type', type=str, default='quad')
|
|
31
|
+
|
|
32
|
+
# DB parmas
|
|
33
|
+
parser.add_argument('--det_db_thresh', type=float, default=0.3)
|
|
34
|
+
parser.add_argument('--det_db_box_thresh', type=float, default=0.6)
|
|
35
|
+
parser.add_argument('--det_db_unclip_ratio', type=float, default=1.5)
|
|
36
|
+
parser.add_argument('--max_batch_size', type=int, default=10)
|
|
37
|
+
parser.add_argument('--use_dilation', type=str2bool, default=False)
|
|
38
|
+
parser.add_argument('--det_db_score_mode', type=str, default='fast')
|
|
39
|
+
|
|
40
|
+
# params for text recognizer
|
|
41
|
+
parser.add_argument('--rec_algorithm', type=str, default='SVTR_LCNet')
|
|
42
|
+
parser.add_argument('--rec_model_dir', type=str)
|
|
43
|
+
parser.add_argument('--rec_image_inverse', type=str2bool, default=True)
|
|
44
|
+
parser.add_argument('--rec_image_shape', type=str, default='3, 48, 320')
|
|
45
|
+
parser.add_argument('--rec_batch_num', type=int, default=6)
|
|
46
|
+
parser.add_argument('--max_text_length', type=int, default=25)
|
|
47
|
+
parser.add_argument('--vis_font_path',
|
|
48
|
+
type=str,
|
|
49
|
+
default='./doc/fonts/simfang.ttf')
|
|
50
|
+
parser.add_argument('--drop_score', type=float, default=0.5)
|
|
51
|
+
|
|
52
|
+
# params for text classifier
|
|
53
|
+
parser.add_argument('--use_angle_cls', type=str2bool, default=False)
|
|
54
|
+
parser.add_argument('--cls_model_dir', type=str)
|
|
55
|
+
parser.add_argument('--cls_image_shape', type=str, default='3, 48, 192')
|
|
56
|
+
parser.add_argument('--label_list', type=list, default=['0', '180'])
|
|
57
|
+
parser.add_argument('--cls_batch_num', type=int, default=6)
|
|
58
|
+
parser.add_argument('--cls_thresh', type=float, default=0.9)
|
|
59
|
+
|
|
60
|
+
parser.add_argument('--warmup', type=str2bool, default=False)
|
|
61
|
+
|
|
62
|
+
#
|
|
63
|
+
parser.add_argument('--output', type=str, default='./inference_results')
|
|
64
|
+
parser.add_argument('--save_crop_res', type=str2bool, default=False)
|
|
65
|
+
parser.add_argument('--crop_res_save_dir', type=str, default='./output')
|
|
66
|
+
|
|
67
|
+
# multi-process
|
|
68
|
+
parser.add_argument('--use_mp', type=str2bool, default=False)
|
|
69
|
+
parser.add_argument('--total_process_num', type=int, default=1)
|
|
70
|
+
parser.add_argument('--process_id', type=int, default=0)
|
|
71
|
+
|
|
72
|
+
parser.add_argument('--show_log', type=str2bool, default=True)
|
|
73
|
+
return parser
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def parse_args():
|
|
77
|
+
parser = init_args()
|
|
78
|
+
return parser.parse_args()
|
|
79
|
+
|
|
80
|
+
def create_font(txt, sz, font_path="./doc/fonts/simfang.ttf"):
|
|
81
|
+
font_size = int(sz[1] * 0.99)
|
|
82
|
+
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
|
|
83
|
+
if int(PIL.__version__.split(".")[0]) < 10:
|
|
84
|
+
length = font.getsize(txt)[0]
|
|
85
|
+
else:
|
|
86
|
+
length = font.getlength(txt)
|
|
87
|
+
|
|
88
|
+
if length > sz[0]:
|
|
89
|
+
font_size = int(font_size * sz[0] / length)
|
|
90
|
+
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
|
|
91
|
+
return font
|
|
92
|
+
|
|
93
|
+
def draw_box_txt_fine(img_size, box, txt, font_path="./doc/fonts/simfang.ttf"):
|
|
94
|
+
box_height = int(
|
|
95
|
+
math.sqrt((box[0][0] - box[3][0]) ** 2 + (box[0][1] - box[3][1]) ** 2)
|
|
96
|
+
)
|
|
97
|
+
box_width = int(
|
|
98
|
+
math.sqrt((box[0][0] - box[1][0]) ** 2 + (box[0][1] - box[1][1]) ** 2)
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if box_height > 2 * box_width and box_height > 30:
|
|
102
|
+
img_text = Image.new("RGB", (box_height, box_width), (255, 255, 255))
|
|
103
|
+
draw_text = ImageDraw.Draw(img_text)
|
|
104
|
+
if txt:
|
|
105
|
+
font = create_font(txt, (box_height, box_width), font_path)
|
|
106
|
+
draw_text.text([0, 0], txt, fill=(0, 0, 0), font=font)
|
|
107
|
+
img_text = img_text.transpose(Image.ROTATE_270)
|
|
108
|
+
else:
|
|
109
|
+
img_text = Image.new("RGB", (box_width, box_height), (255, 255, 255))
|
|
110
|
+
draw_text = ImageDraw.Draw(img_text)
|
|
111
|
+
if txt:
|
|
112
|
+
font = create_font(txt, (box_width, box_height), font_path)
|
|
113
|
+
draw_text.text([0, 0], txt, fill=(0, 0, 0), font=font)
|
|
114
|
+
|
|
115
|
+
pts1 = np.float32(
|
|
116
|
+
[[0, 0], [box_width, 0], [box_width, box_height], [0, box_height]]
|
|
117
|
+
)
|
|
118
|
+
pts2 = np.array(box, dtype=np.float32)
|
|
119
|
+
M = cv2.getPerspectiveTransform(pts1, pts2)
|
|
120
|
+
|
|
121
|
+
img_text = np.array(img_text, dtype=np.uint8)
|
|
122
|
+
img_right_text = cv2.warpPerspective(
|
|
123
|
+
img_text,
|
|
124
|
+
M,
|
|
125
|
+
img_size,
|
|
126
|
+
flags=cv2.INTER_NEAREST,
|
|
127
|
+
borderMode=cv2.BORDER_CONSTANT,
|
|
128
|
+
borderValue=(255, 255, 255),
|
|
129
|
+
)
|
|
130
|
+
return img_right_text
|
|
131
|
+
|
|
132
|
+
def draw_ocr_box_txt(
|
|
133
|
+
image,
|
|
134
|
+
boxes,
|
|
135
|
+
txts=None,
|
|
136
|
+
scores=None,
|
|
137
|
+
drop_score=0.5,
|
|
138
|
+
font_path="./doc/fonts/simfang.ttf",
|
|
139
|
+
):
|
|
140
|
+
h, w = image.height, image.width
|
|
141
|
+
img_left = image.copy()
|
|
142
|
+
img_right = np.ones((h, w, 3), dtype=np.uint8) * 255
|
|
143
|
+
random.seed(0)
|
|
144
|
+
|
|
145
|
+
draw_left = ImageDraw.Draw(img_left)
|
|
146
|
+
if txts is None or len(txts) != len(boxes):
|
|
147
|
+
txts = [None] * len(boxes)
|
|
148
|
+
for idx, (box, txt) in enumerate(zip(boxes, txts)):
|
|
149
|
+
if scores is not None and scores[idx] < drop_score:
|
|
150
|
+
continue
|
|
151
|
+
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
|
152
|
+
if isinstance(box[0], list):
|
|
153
|
+
box = list(map(tuple, box))
|
|
154
|
+
draw_left.polygon(box, fill=color)
|
|
155
|
+
img_right_text = draw_box_txt_fine((w, h), box, txt, font_path)
|
|
156
|
+
pts = np.array(box, np.int32).reshape((-1, 1, 2))
|
|
157
|
+
cv2.polylines(img_right_text, [pts], True, color, 1)
|
|
158
|
+
img_right = cv2.bitwise_and(img_right, img_right_text)
|
|
159
|
+
img_left = Image.blend(image, img_left, 0.5)
|
|
160
|
+
img_show = Image.new("RGB", (w * 2, h), (255, 255, 255))
|
|
161
|
+
img_show.paste(img_left, (0, 0, w, h))
|
|
162
|
+
img_show.paste(Image.fromarray(img_right), (w, 0, w * 2, h))
|
|
163
|
+
return np.array(img_show)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def get_rotate_crop_image(img, points):
|
|
167
|
+
"""
|
|
168
|
+
img_height, img_width = img.shape[0:2]
|
|
169
|
+
left = int(np.min(points[:, 0]))
|
|
170
|
+
right = int(np.max(points[:, 0]))
|
|
171
|
+
top = int(np.min(points[:, 1]))
|
|
172
|
+
bottom = int(np.max(points[:, 1]))
|
|
173
|
+
img_crop = img[top:bottom, left:right, :].copy()
|
|
174
|
+
points[:, 0] = points[:, 0] - left
|
|
175
|
+
points[:, 1] = points[:, 1] - top
|
|
176
|
+
"""
|
|
177
|
+
assert len(points) == 4, 'shape of points must be 4*2'
|
|
178
|
+
img_crop_width = int(
|
|
179
|
+
max(np.linalg.norm(points[0] - points[1]),
|
|
180
|
+
np.linalg.norm(points[2] - points[3])))
|
|
181
|
+
img_crop_height = int(
|
|
182
|
+
max(np.linalg.norm(points[0] - points[3]),
|
|
183
|
+
np.linalg.norm(points[1] - points[2])))
|
|
184
|
+
pts_std = np.float32([
|
|
185
|
+
[0, 0],
|
|
186
|
+
[img_crop_width, 0],
|
|
187
|
+
[img_crop_width, img_crop_height],
|
|
188
|
+
[0, img_crop_height],
|
|
189
|
+
])
|
|
190
|
+
M = cv2.getPerspectiveTransform(points, pts_std)
|
|
191
|
+
dst_img = cv2.warpPerspective(
|
|
192
|
+
img,
|
|
193
|
+
M,
|
|
194
|
+
(img_crop_width, img_crop_height),
|
|
195
|
+
borderMode=cv2.BORDER_REPLICATE,
|
|
196
|
+
flags=cv2.INTER_CUBIC,
|
|
197
|
+
)
|
|
198
|
+
dst_img_height, dst_img_width = dst_img.shape[0:2]
|
|
199
|
+
if dst_img_height * 1.0 / dst_img_width >= 1.5:
|
|
200
|
+
dst_img = np.rot90(dst_img)
|
|
201
|
+
return dst_img
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def get_minarea_rect_crop(img, points):
|
|
205
|
+
bounding_box = cv2.minAreaRect(np.array(points).astype(np.int32))
|
|
206
|
+
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
|
|
207
|
+
|
|
208
|
+
index_a, index_b, index_c, index_d = 0, 1, 2, 3
|
|
209
|
+
if points[1][1] > points[0][1]:
|
|
210
|
+
index_a = 0
|
|
211
|
+
index_d = 1
|
|
212
|
+
else:
|
|
213
|
+
index_a = 1
|
|
214
|
+
index_d = 0
|
|
215
|
+
if points[3][1] > points[2][1]:
|
|
216
|
+
index_b = 2
|
|
217
|
+
index_c = 3
|
|
218
|
+
else:
|
|
219
|
+
index_b = 3
|
|
220
|
+
index_c = 2
|
|
221
|
+
|
|
222
|
+
box = [points[index_a], points[index_b], points[index_c], points[index_d]]
|
|
223
|
+
crop_img = get_rotate_crop_image(img, np.array(box))
|
|
224
|
+
return crop_img
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def check_gpu(use_gpu):
|
|
228
|
+
if use_gpu and not torch.cuda.is_available():
|
|
229
|
+
use_gpu = False
|
|
230
|
+
return use_gpu
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
if __name__ == '__main__':
|
|
234
|
+
pass
|