openocr-python 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openocr/__init__.py +11 -0
- openocr/configs/det/dbnet/repvit_db.yml +173 -0
- openocr/configs/rec/abinet/resnet45_trans_abinet_lang.yml +94 -0
- openocr/configs/rec/abinet/resnet45_trans_abinet_wo_lang.yml +93 -0
- openocr/configs/rec/abinet/svtrv2_abinet_lang.yml +130 -0
- openocr/configs/rec/abinet/svtrv2_abinet_wo_lang.yml +128 -0
- openocr/configs/rec/aster/resnet31_lstm_aster_tps_on.yml +93 -0
- openocr/configs/rec/aster/svtrv2_aster.yml +127 -0
- openocr/configs/rec/aster/svtrv2_aster_tps_on.yml +102 -0
- openocr/configs/rec/autostr/autostr_lstm_aster_tps_on.yml +95 -0
- openocr/configs/rec/busnet/svtrv2_busnet.yml +135 -0
- openocr/configs/rec/busnet/svtrv2_busnet_pretraining.yml +134 -0
- openocr/configs/rec/busnet/vit_busnet.yml +104 -0
- openocr/configs/rec/busnet/vit_busnet_pretraining.yml +104 -0
- openocr/configs/rec/cam/convnextv2_cam_tps_on.yml +118 -0
- openocr/configs/rec/cam/convnextv2_tiny_cam_tps_on.yml +118 -0
- openocr/configs/rec/cam/svtrv2_cam_tps_on.yml +123 -0
- openocr/configs/rec/cdistnet/resnet45_trans_cdistnet.yml +93 -0
- openocr/configs/rec/cdistnet/svtrv2_cdistnet.yml +139 -0
- openocr/configs/rec/cppd/svtr_base_cppd.yml +123 -0
- openocr/configs/rec/cppd/svtr_base_cppd_ch.yml +126 -0
- openocr/configs/rec/cppd/svtr_base_cppd_h8.yml +123 -0
- openocr/configs/rec/cppd/svtr_base_cppd_syn.yml +124 -0
- openocr/configs/rec/cppd/svtrv2_cppd.yml +150 -0
- openocr/configs/rec/dan/resnet45_fpn_dan.yml +98 -0
- openocr/configs/rec/dan/svtrv2_dan.yml +130 -0
- openocr/configs/rec/focalsvtr/focalsvtr_ctc.yml +137 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_nrtr_gtc.yml +168 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_long_infer.yml +151 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_smtr_long.yml +150 -0
- openocr/configs/rec/gtc/svtrv2_lnconv_smtr_gtc_stream.yml +152 -0
- openocr/configs/rec/igtr/svtr_base_ds_igtr.yml +157 -0
- openocr/configs/rec/lister/focalsvtr_lister_wo_fem_maxratio12.yml +133 -0
- openocr/configs/rec/lister/svtrv2_lister_wo_fem_maxratio12.yml +138 -0
- openocr/configs/rec/lpv/svtr_base_lpv.yml +124 -0
- openocr/configs/rec/lpv/svtr_base_lpv_wo_glrm.yml +123 -0
- openocr/configs/rec/lpv/svtrv2_lpv.yml +147 -0
- openocr/configs/rec/lpv/svtrv2_lpv_wo_glrm.yml +146 -0
- openocr/configs/rec/maerec/vit_nrtr.yml +116 -0
- openocr/configs/rec/matrn/resnet45_trans_matrn.yml +95 -0
- openocr/configs/rec/matrn/svtrv2_matrn.yml +130 -0
- openocr/configs/rec/mgpstr/svtrv2_mgpstr_only_char.yml +140 -0
- openocr/configs/rec/mgpstr/vit_base_mgpstr_only_char.yml +111 -0
- openocr/configs/rec/mgpstr/vit_large_mgpstr_only_char.yml +110 -0
- openocr/configs/rec/mgpstr/vit_mgpstr.yml +110 -0
- openocr/configs/rec/mgpstr/vit_mgpstr_only_char.yml +110 -0
- openocr/configs/rec/moran/resnet31_lstm_moran.yml +92 -0
- openocr/configs/rec/nrtr/focalsvtr_nrtr_maxraio12.yml +145 -0
- openocr/configs/rec/nrtr/nrtr.yml +107 -0
- openocr/configs/rec/nrtr/svtr_base_nrtr.yml +118 -0
- openocr/configs/rec/nrtr/svtr_base_nrtr_syn.yml +119 -0
- openocr/configs/rec/nrtr/svtrv2_nrtr.yml +146 -0
- openocr/configs/rec/ote/svtr_base_h8_ote.yml +117 -0
- openocr/configs/rec/ote/svtr_base_ote.yml +116 -0
- openocr/configs/rec/parseq/focalsvtr_parseq_maxratio12.yml +140 -0
- openocr/configs/rec/parseq/svrtv2_parseq.yml +136 -0
- openocr/configs/rec/parseq/vit_parseq.yml +100 -0
- openocr/configs/rec/robustscanner/resnet31_robustscanner.yml +102 -0
- openocr/configs/rec/robustscanner/svtrv2_robustscanner.yml +134 -0
- openocr/configs/rec/sar/resnet31_lstm_sar.yml +94 -0
- openocr/configs/rec/sar/svtrv2_sar.yml +128 -0
- openocr/configs/rec/seed/resnet31_lstm_seed_tps_on.yml +96 -0
- openocr/configs/rec/smtr/focalsvtr_smtr.yml +150 -0
- openocr/configs/rec/smtr/focalsvtr_smtr_long.yml +133 -0
- openocr/configs/rec/smtr/svtrv2_smtr.yml +150 -0
- openocr/configs/rec/smtr/svtrv2_smtr_bi.yml +136 -0
- openocr/configs/rec/srn/resnet50_fpn_srn.yml +97 -0
- openocr/configs/rec/srn/svtrv2_srn.yml +131 -0
- openocr/configs/rec/svtrs/convnextv2_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_h8_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_h8_rctc.yml +106 -0
- openocr/configs/rec/svtrs/convnextv2_rctc.yml +106 -0
- openocr/configs/rec/svtrs/convnextv2_tiny_h8_ctc.yml +105 -0
- openocr/configs/rec/svtrs/convnextv2_tiny_h8_rctc.yml +106 -0
- openocr/configs/rec/svtrs/crnn_ctc.yml +99 -0
- openocr/configs/rec/svtrs/crnn_ctc_long.yml +116 -0
- openocr/configs/rec/svtrs/focalnet_base_ctc.yml +108 -0
- openocr/configs/rec/svtrs/focalnet_base_rctc.yml +109 -0
- openocr/configs/rec/svtrs/focalsvtr_ctc.yml +106 -0
- openocr/configs/rec/svtrs/focalsvtr_rctc.yml +107 -0
- openocr/configs/rec/svtrs/resnet45_trans_ctc.yml +103 -0
- openocr/configs/rec/svtrs/resnet45_trans_rctc.yml +104 -0
- openocr/configs/rec/svtrs/svtr_base_ctc.yml +110 -0
- openocr/configs/rec/svtrs/svtr_base_rctc.yml +111 -0
- openocr/configs/rec/svtrs/svtrnet_ctc_syn.yml +111 -0
- openocr/configs/rec/svtrs/vit_ctc.yml +103 -0
- openocr/configs/rec/svtrs/vit_rctc.yml +103 -0
- openocr/configs/rec/svtrv2/repsvtr_ch.yml +121 -0
- openocr/configs/rec/svtrv2/svtrv2_ch.yml +133 -0
- openocr/configs/rec/svtrv2/svtrv2_ctc.yml +136 -0
- openocr/configs/rec/svtrv2/svtrv2_rctc.yml +135 -0
- openocr/configs/rec/svtrv2/svtrv2_small_rctc.yml +135 -0
- openocr/configs/rec/svtrv2/svtrv2_smtr_gtc_rctc.yml +162 -0
- openocr/configs/rec/svtrv2/svtrv2_smtr_gtc_rctc_ch.yml +153 -0
- openocr/configs/rec/svtrv2/svtrv2_tiny_rctc.yml +135 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LA.yml +103 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LF_1.yml +102 -0
- openocr/configs/rec/visionlan/resnet45_trans_visionlan_LF_2.yml +103 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LA.yml +112 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LF_1.yml +111 -0
- openocr/configs/rec/visionlan/svtrv2_visionlan_LF_2.yml +112 -0
- openocr/demo_gradio.py +128 -0
- openocr/opendet/modeling/__init__.py +11 -0
- openocr/opendet/modeling/backbones/__init__.py +14 -0
- openocr/opendet/modeling/backbones/repvit.py +340 -0
- openocr/opendet/modeling/base_detector.py +69 -0
- openocr/opendet/modeling/heads/__init__.py +14 -0
- openocr/opendet/modeling/heads/db_head.py +73 -0
- openocr/opendet/modeling/necks/__init__.py +14 -0
- openocr/opendet/modeling/necks/db_fpn.py +609 -0
- openocr/opendet/postprocess/__init__.py +18 -0
- openocr/opendet/postprocess/db_postprocess.py +273 -0
- openocr/opendet/preprocess/__init__.py +154 -0
- openocr/opendet/preprocess/crop_resize.py +121 -0
- openocr/opendet/preprocess/db_resize_for_test.py +135 -0
- openocr/openrec/losses/__init__.py +62 -0
- openocr/openrec/losses/abinet_loss.py +42 -0
- openocr/openrec/losses/ar_loss.py +23 -0
- openocr/openrec/losses/cam_loss.py +48 -0
- openocr/openrec/losses/cdistnet_loss.py +34 -0
- openocr/openrec/losses/ce_loss.py +68 -0
- openocr/openrec/losses/cppd_loss.py +77 -0
- openocr/openrec/losses/ctc_loss.py +33 -0
- openocr/openrec/losses/igtr_loss.py +12 -0
- openocr/openrec/losses/lister_loss.py +14 -0
- openocr/openrec/losses/lpv_loss.py +30 -0
- openocr/openrec/losses/mgp_loss.py +34 -0
- openocr/openrec/losses/parseq_loss.py +12 -0
- openocr/openrec/losses/robustscanner_loss.py +20 -0
- openocr/openrec/losses/seed_loss.py +46 -0
- openocr/openrec/losses/smtr_loss.py +12 -0
- openocr/openrec/losses/srn_loss.py +40 -0
- openocr/openrec/losses/visionlan_loss.py +58 -0
- openocr/openrec/metrics/__init__.py +19 -0
- openocr/openrec/metrics/rec_metric.py +270 -0
- openocr/openrec/metrics/rec_metric_gtc.py +58 -0
- openocr/openrec/metrics/rec_metric_long.py +142 -0
- openocr/openrec/metrics/rec_metric_mgp.py +93 -0
- openocr/openrec/modeling/__init__.py +11 -0
- openocr/openrec/modeling/base_recognizer.py +69 -0
- openocr/openrec/modeling/common.py +238 -0
- openocr/openrec/modeling/decoders/__init__.py +109 -0
- openocr/openrec/modeling/decoders/abinet_decoder.py +283 -0
- openocr/openrec/modeling/decoders/aster_decoder.py +170 -0
- openocr/openrec/modeling/decoders/bus_decoder.py +133 -0
- openocr/openrec/modeling/decoders/cam_decoder.py +43 -0
- openocr/openrec/modeling/decoders/cdistnet_decoder.py +334 -0
- openocr/openrec/modeling/decoders/cppd_decoder.py +393 -0
- openocr/openrec/modeling/decoders/ctc_decoder.py +203 -0
- openocr/openrec/modeling/decoders/dan_decoder.py +203 -0
- openocr/openrec/modeling/decoders/igtr_decoder.py +815 -0
- openocr/openrec/modeling/decoders/lister_decoder.py +535 -0
- openocr/openrec/modeling/decoders/lpv_decoder.py +119 -0
- openocr/openrec/modeling/decoders/matrn_decoder.py +236 -0
- openocr/openrec/modeling/decoders/mgp_decoder.py +99 -0
- openocr/openrec/modeling/decoders/nrtr_decoder.py +439 -0
- openocr/openrec/modeling/decoders/ote_decoder.py +205 -0
- openocr/openrec/modeling/decoders/parseq_decoder.py +504 -0
- openocr/openrec/modeling/decoders/rctc_decoder.py +70 -0
- openocr/openrec/modeling/decoders/robustscanner_decoder.py +749 -0
- openocr/openrec/modeling/decoders/sar_decoder.py +236 -0
- openocr/openrec/modeling/decoders/smtr_decoder.py +621 -0
- openocr/openrec/modeling/decoders/smtr_decoder_nattn.py +521 -0
- openocr/openrec/modeling/decoders/srn_decoder.py +283 -0
- openocr/openrec/modeling/decoders/visionlan_decoder.py +321 -0
- openocr/openrec/modeling/encoders/__init__.py +39 -0
- openocr/openrec/modeling/encoders/autostr_encoder.py +327 -0
- openocr/openrec/modeling/encoders/cam_encoder.py +760 -0
- openocr/openrec/modeling/encoders/convnextv2.py +213 -0
- openocr/openrec/modeling/encoders/focalsvtr.py +631 -0
- openocr/openrec/modeling/encoders/nrtr_encoder.py +28 -0
- openocr/openrec/modeling/encoders/rec_hgnet.py +346 -0
- openocr/openrec/modeling/encoders/rec_lcnetv3.py +488 -0
- openocr/openrec/modeling/encoders/rec_mobilenet_v3.py +132 -0
- openocr/openrec/modeling/encoders/rec_mv1_enhance.py +254 -0
- openocr/openrec/modeling/encoders/rec_nrtr_mtb.py +37 -0
- openocr/openrec/modeling/encoders/rec_resnet_31.py +213 -0
- openocr/openrec/modeling/encoders/rec_resnet_45.py +183 -0
- openocr/openrec/modeling/encoders/rec_resnet_fpn.py +216 -0
- openocr/openrec/modeling/encoders/rec_resnet_vd.py +252 -0
- openocr/openrec/modeling/encoders/repvit.py +338 -0
- openocr/openrec/modeling/encoders/resnet31_rnn.py +123 -0
- openocr/openrec/modeling/encoders/svtrnet.py +574 -0
- openocr/openrec/modeling/encoders/svtrnet2dpos.py +616 -0
- openocr/openrec/modeling/encoders/svtrv2.py +470 -0
- openocr/openrec/modeling/encoders/svtrv2_lnconv.py +503 -0
- openocr/openrec/modeling/encoders/svtrv2_lnconv_two33.py +517 -0
- openocr/openrec/modeling/encoders/vit.py +120 -0
- openocr/openrec/modeling/transforms/__init__.py +15 -0
- openocr/openrec/modeling/transforms/aster_tps.py +262 -0
- openocr/openrec/modeling/transforms/moran.py +136 -0
- openocr/openrec/modeling/transforms/tps.py +246 -0
- openocr/openrec/optimizer/__init__.py +73 -0
- openocr/openrec/optimizer/lr.py +227 -0
- openocr/openrec/postprocess/__init__.py +72 -0
- openocr/openrec/postprocess/abinet_postprocess.py +37 -0
- openocr/openrec/postprocess/ar_postprocess.py +63 -0
- openocr/openrec/postprocess/ce_postprocess.py +43 -0
- openocr/openrec/postprocess/char_postprocess.py +108 -0
- openocr/openrec/postprocess/cppd_postprocess.py +42 -0
- openocr/openrec/postprocess/ctc_postprocess.py +119 -0
- openocr/openrec/postprocess/igtr_postprocess.py +100 -0
- openocr/openrec/postprocess/lister_postprocess.py +59 -0
- openocr/openrec/postprocess/mgp_postprocess.py +143 -0
- openocr/openrec/postprocess/nrtr_postprocess.py +75 -0
- openocr/openrec/postprocess/smtr_postprocess.py +73 -0
- openocr/openrec/postprocess/srn_postprocess.py +80 -0
- openocr/openrec/postprocess/visionlan_postprocess.py +81 -0
- openocr/openrec/preprocess/__init__.py +173 -0
- openocr/openrec/preprocess/abinet_aug.py +473 -0
- openocr/openrec/preprocess/abinet_label_encode.py +36 -0
- openocr/openrec/preprocess/ar_label_encode.py +36 -0
- openocr/openrec/preprocess/auto_augment.py +1012 -0
- openocr/openrec/preprocess/cam_label_encode.py +141 -0
- openocr/openrec/preprocess/ce_label_encode.py +116 -0
- openocr/openrec/preprocess/char_label_encode.py +36 -0
- openocr/openrec/preprocess/cppd_label_encode.py +173 -0
- openocr/openrec/preprocess/ctc_label_encode.py +124 -0
- openocr/openrec/preprocess/ep_label_encode.py +38 -0
- openocr/openrec/preprocess/igtr_label_encode.py +360 -0
- openocr/openrec/preprocess/mgp_label_encode.py +95 -0
- openocr/openrec/preprocess/parseq_aug.py +150 -0
- openocr/openrec/preprocess/rec_aug.py +211 -0
- openocr/openrec/preprocess/resize.py +534 -0
- openocr/openrec/preprocess/smtr_label_encode.py +125 -0
- openocr/openrec/preprocess/srn_label_encode.py +37 -0
- openocr/openrec/preprocess/visionlan_label_encode.py +67 -0
- openocr/tools/create_lmdb_dataset.py +118 -0
- openocr/tools/data/__init__.py +94 -0
- openocr/tools/data/collate_fn.py +100 -0
- openocr/tools/data/lmdb_dataset.py +142 -0
- openocr/tools/data/lmdb_dataset_test.py +166 -0
- openocr/tools/data/multi_scale_sampler.py +177 -0
- openocr/tools/data/ratio_dataset.py +217 -0
- openocr/tools/data/ratio_dataset_test.py +273 -0
- openocr/tools/data/ratio_dataset_tvresize.py +213 -0
- openocr/tools/data/ratio_dataset_tvresize_test.py +276 -0
- openocr/tools/data/ratio_sampler.py +190 -0
- openocr/tools/data/simple_dataset.py +263 -0
- openocr/tools/data/strlmdb_dataset.py +143 -0
- openocr/tools/engine/__init__.py +5 -0
- openocr/tools/engine/config.py +158 -0
- openocr/tools/engine/trainer.py +621 -0
- openocr/tools/eval_rec.py +41 -0
- openocr/tools/eval_rec_all_ch.py +184 -0
- openocr/tools/eval_rec_all_en.py +206 -0
- openocr/tools/eval_rec_all_long.py +119 -0
- openocr/tools/eval_rec_all_long_simple.py +122 -0
- openocr/tools/export_rec.py +118 -0
- openocr/tools/infer/onnx_engine.py +65 -0
- openocr/tools/infer/predict_rec.py +140 -0
- openocr/tools/infer/utility.py +234 -0
- openocr/tools/infer_det.py +449 -0
- openocr/tools/infer_e2e.py +462 -0
- openocr/tools/infer_e2e_parallel.py +184 -0
- openocr/tools/infer_rec.py +371 -0
- openocr/tools/train_rec.py +37 -0
- openocr/tools/utility.py +45 -0
- openocr/tools/utils/EN_symbol_dict.txt +94 -0
- openocr/tools/utils/__init__.py +0 -0
- openocr/tools/utils/ckpt.py +87 -0
- openocr/tools/utils/dict/ar_dict.txt +117 -0
- openocr/tools/utils/dict/arabic_dict.txt +161 -0
- openocr/tools/utils/dict/be_dict.txt +145 -0
- openocr/tools/utils/dict/bg_dict.txt +140 -0
- openocr/tools/utils/dict/chinese_cht_dict.txt +8421 -0
- openocr/tools/utils/dict/cyrillic_dict.txt +163 -0
- openocr/tools/utils/dict/devanagari_dict.txt +167 -0
- openocr/tools/utils/dict/en_dict.txt +63 -0
- openocr/tools/utils/dict/fa_dict.txt +136 -0
- openocr/tools/utils/dict/french_dict.txt +136 -0
- openocr/tools/utils/dict/german_dict.txt +143 -0
- openocr/tools/utils/dict/hi_dict.txt +162 -0
- openocr/tools/utils/dict/it_dict.txt +118 -0
- openocr/tools/utils/dict/japan_dict.txt +4399 -0
- openocr/tools/utils/dict/ka_dict.txt +153 -0
- openocr/tools/utils/dict/kie_dict/xfund_class_list.txt +4 -0
- openocr/tools/utils/dict/korean_dict.txt +3688 -0
- openocr/tools/utils/dict/latex_symbol_dict.txt +111 -0
- openocr/tools/utils/dict/latin_dict.txt +185 -0
- openocr/tools/utils/dict/layout_dict/layout_cdla_dict.txt +10 -0
- openocr/tools/utils/dict/layout_dict/layout_publaynet_dict.txt +5 -0
- openocr/tools/utils/dict/layout_dict/layout_table_dict.txt +1 -0
- openocr/tools/utils/dict/mr_dict.txt +153 -0
- openocr/tools/utils/dict/ne_dict.txt +153 -0
- openocr/tools/utils/dict/oc_dict.txt +96 -0
- openocr/tools/utils/dict/pu_dict.txt +130 -0
- openocr/tools/utils/dict/rs_dict.txt +91 -0
- openocr/tools/utils/dict/rsc_dict.txt +134 -0
- openocr/tools/utils/dict/ru_dict.txt +125 -0
- openocr/tools/utils/dict/spin_dict.txt +68 -0
- openocr/tools/utils/dict/ta_dict.txt +128 -0
- openocr/tools/utils/dict/table_dict.txt +277 -0
- openocr/tools/utils/dict/table_master_structure_dict.txt +39 -0
- openocr/tools/utils/dict/table_structure_dict.txt +28 -0
- openocr/tools/utils/dict/table_structure_dict_ch.txt +48 -0
- openocr/tools/utils/dict/te_dict.txt +151 -0
- openocr/tools/utils/dict/ug_dict.txt +114 -0
- openocr/tools/utils/dict/uk_dict.txt +142 -0
- openocr/tools/utils/dict/ur_dict.txt +137 -0
- openocr/tools/utils/dict/xi_dict.txt +110 -0
- openocr/tools/utils/dict90.txt +90 -0
- openocr/tools/utils/e2e_metric/Deteval.py +802 -0
- openocr/tools/utils/e2e_metric/polygon_fast.py +70 -0
- openocr/tools/utils/e2e_utils/extract_batchsize.py +86 -0
- openocr/tools/utils/e2e_utils/extract_textpoint_fast.py +479 -0
- openocr/tools/utils/e2e_utils/extract_textpoint_slow.py +582 -0
- openocr/tools/utils/e2e_utils/pgnet_pp_utils.py +159 -0
- openocr/tools/utils/e2e_utils/visual.py +152 -0
- openocr/tools/utils/en_dict.txt +95 -0
- openocr/tools/utils/gen_label.py +68 -0
- openocr/tools/utils/ic15_dict.txt +36 -0
- openocr/tools/utils/logging.py +56 -0
- openocr/tools/utils/poly_nms.py +132 -0
- openocr/tools/utils/ppocr_keys_v1.txt +6623 -0
- openocr/tools/utils/stats.py +58 -0
- openocr/tools/utils/utility.py +165 -0
- openocr/tools/utils/visual.py +117 -0
- openocr_python-0.0.2.dist-info/LICENCE +201 -0
- openocr_python-0.0.2.dist-info/METADATA +98 -0
- openocr_python-0.0.2.dist-info/RECORD +323 -0
- openocr_python-0.0.2.dist-info/WHEEL +5 -0
- openocr_python-0.0.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
|
|
3
|
+
from torch import nn
|
|
4
|
+
|
|
5
|
+
from .abinet_loss import ABINetLoss
|
|
6
|
+
from .ar_loss import ARLoss
|
|
7
|
+
from .cdistnet_loss import CDistNetLoss
|
|
8
|
+
from .ce_loss import CELoss
|
|
9
|
+
from .cppd_loss import CPPDLoss
|
|
10
|
+
from .ctc_loss import CTCLoss
|
|
11
|
+
from .igtr_loss import IGTRLoss
|
|
12
|
+
from .lister_loss import LISTERLoss
|
|
13
|
+
from .lpv_loss import LPVLoss
|
|
14
|
+
from .mgp_loss import MGPLoss
|
|
15
|
+
from .parseq_loss import PARSeqLoss
|
|
16
|
+
from .robustscanner_loss import RobustScannerLoss
|
|
17
|
+
from .smtr_loss import SMTRLoss
|
|
18
|
+
from .srn_loss import SRNLoss
|
|
19
|
+
from .visionlan_loss import VisionLANLoss
|
|
20
|
+
from .cam_loss import CAMLoss
|
|
21
|
+
from .seed_loss import SEEDLoss
|
|
22
|
+
|
|
23
|
+
support_dict = [
|
|
24
|
+
'CTCLoss', 'ARLoss', 'CELoss', 'CPPDLoss', 'ABINetLoss', 'CDistNetLoss',
|
|
25
|
+
'VisionLANLoss', 'PARSeqLoss', 'IGTRLoss', 'SMTRLoss', 'LPVLoss',
|
|
26
|
+
'RobustScannerLoss', 'SRNLoss', 'LISTERLoss', 'GTCLoss', 'MGPLoss',
|
|
27
|
+
'CAMLoss', 'SEEDLoss'
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def build_loss(config):
|
|
32
|
+
config = copy.deepcopy(config)
|
|
33
|
+
module_name = config.pop('name')
|
|
34
|
+
assert module_name in support_dict, Exception(
|
|
35
|
+
'loss only support {}'.format(support_dict))
|
|
36
|
+
module_class = eval(module_name)(**config)
|
|
37
|
+
return module_class
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class GTCLoss(nn.Module):
|
|
41
|
+
|
|
42
|
+
def __init__(self,
|
|
43
|
+
gtc_loss,
|
|
44
|
+
gtc_weight=1.0,
|
|
45
|
+
ctc_weight=1.0,
|
|
46
|
+
zero_infinity=True,
|
|
47
|
+
**kwargs):
|
|
48
|
+
super(GTCLoss, self).__init__()
|
|
49
|
+
self.ctc_loss = CTCLoss(zero_infinity=zero_infinity)
|
|
50
|
+
self.gtc_loss = build_loss(gtc_loss)
|
|
51
|
+
self.gtc_weight = gtc_weight
|
|
52
|
+
self.ctc_weight = ctc_weight
|
|
53
|
+
|
|
54
|
+
def forward(self, predicts, batch):
|
|
55
|
+
ctc_loss = self.ctc_loss(predicts['ctc_pred'],
|
|
56
|
+
[None] + batch[-2:])['loss']
|
|
57
|
+
gtc_loss = self.gtc_loss(predicts['gtc_pred'], batch[:-2])['loss']
|
|
58
|
+
return {
|
|
59
|
+
'loss': self.ctc_weight * ctc_loss + self.gtc_weight * gtc_loss,
|
|
60
|
+
'ctc_loss': ctc_loss,
|
|
61
|
+
'gtc_loss': gtc_loss
|
|
62
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ABINetLoss(nn.Module):
|
|
6
|
+
|
|
7
|
+
def __init__(self,
|
|
8
|
+
smoothing=False,
|
|
9
|
+
ignore_index=100,
|
|
10
|
+
align_weight=1.0,
|
|
11
|
+
**kwargs):
|
|
12
|
+
super(ABINetLoss, self).__init__()
|
|
13
|
+
if ignore_index >= 0:
|
|
14
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean',
|
|
15
|
+
ignore_index=ignore_index)
|
|
16
|
+
else:
|
|
17
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean')
|
|
18
|
+
self.smoothing = smoothing
|
|
19
|
+
self.align_weight = align_weight
|
|
20
|
+
|
|
21
|
+
def forward(self, pred, batch):
|
|
22
|
+
loss = {}
|
|
23
|
+
loss_sum = []
|
|
24
|
+
for name, logits in pred.items():
|
|
25
|
+
if isinstance(logits, list):
|
|
26
|
+
logit_num = len(logits)
|
|
27
|
+
if logit_num > 0:
|
|
28
|
+
all_tgt = torch.cat([batch[1]] * logit_num, 0)
|
|
29
|
+
all_logits = torch.cat(logits, 0)
|
|
30
|
+
flt_logtis = all_logits.reshape([-1, all_logits.shape[2]])
|
|
31
|
+
flt_tgt = all_tgt.reshape([-1])
|
|
32
|
+
else:
|
|
33
|
+
continue
|
|
34
|
+
else:
|
|
35
|
+
flt_logtis = logits.reshape([-1, logits.shape[2]])
|
|
36
|
+
flt_tgt = batch[1].reshape([-1])
|
|
37
|
+
|
|
38
|
+
loss[name + '_loss'] = self.loss_func(flt_logtis, flt_tgt) * (
|
|
39
|
+
self.align_weight if name == 'align' else 1.0)
|
|
40
|
+
loss_sum.append(loss[name + '_loss'])
|
|
41
|
+
loss['loss'] = sum(loss_sum)
|
|
42
|
+
return loss
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch.nn.functional as F
|
|
2
|
+
from torch import nn
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ARLoss(nn.Module):
|
|
6
|
+
|
|
7
|
+
def __init__(self, label_smoothing=0.1, ignore_index=0, **kwargs):
|
|
8
|
+
super(ARLoss, self).__init__()
|
|
9
|
+
self.label_smoothing = label_smoothing
|
|
10
|
+
|
|
11
|
+
def forward(self, pred, batch):
|
|
12
|
+
max_len = batch[2].max()
|
|
13
|
+
tgt = batch[1][:, 1:2 + max_len]
|
|
14
|
+
pred = pred.flatten(0, 1)
|
|
15
|
+
tgt = tgt.reshape([-1])
|
|
16
|
+
loss = F.cross_entropy(
|
|
17
|
+
pred,
|
|
18
|
+
tgt,
|
|
19
|
+
reduction='mean',
|
|
20
|
+
label_smoothing=self.label_smoothing,
|
|
21
|
+
ignore_index=pred.shape[1] + 1,
|
|
22
|
+
) # self.loss_func(pred, tgt)
|
|
23
|
+
return {'loss': loss}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn.functional as F
|
|
3
|
+
|
|
4
|
+
from .ar_loss import ARLoss
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def BanlanceMultiClassCrossEntropyLoss(x_o, x_t):
|
|
8
|
+
# [B, num_cls, H, W]
|
|
9
|
+
B, num_cls, H, W = x_o.shape
|
|
10
|
+
x_o = x_o.reshape(B, num_cls, H * W).permute(0, 2, 1)
|
|
11
|
+
# [B, H, W, num_cls]
|
|
12
|
+
# generate gt
|
|
13
|
+
x_t[x_t > 0.5] = 1
|
|
14
|
+
x_t[x_t <= 0.5] = 0
|
|
15
|
+
fg_x_t = x_t.sum(-1) # [B, H, W]
|
|
16
|
+
x_t = x_t.argmax(-1) # [B, H, W]
|
|
17
|
+
x_t[fg_x_t == 0] = num_cls - 1 # background
|
|
18
|
+
x_t = x_t.reshape(B, H * W)
|
|
19
|
+
# loss
|
|
20
|
+
weight = torch.ones((B, num_cls)).type_as(x_o) # the weight of bg is 1.
|
|
21
|
+
num_bg = (x_t == (num_cls - 1)).sum(-1) # [B]
|
|
22
|
+
weight[:, :-1] = (num_bg / (H * W - num_bg + 1e-5)).unsqueeze(-1).expand(
|
|
23
|
+
-1, num_cls - 1)
|
|
24
|
+
logit = F.log_softmax(x_o, dim=-1) # [B, H*W, num_cls]
|
|
25
|
+
logit = logit * weight.unsqueeze(1)
|
|
26
|
+
loss = -logit.gather(2, x_t.unsqueeze(-1).long())
|
|
27
|
+
return loss.mean()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class CAMLoss(ARLoss):
|
|
31
|
+
|
|
32
|
+
def __init__(self, label_smoothing=0.1, loss_weight_binary=1.5, **kwargs):
|
|
33
|
+
super(CAMLoss, self).__init__(label_smoothing=label_smoothing)
|
|
34
|
+
self.label_smoothing = label_smoothing
|
|
35
|
+
self.loss_weight_binary = loss_weight_binary
|
|
36
|
+
|
|
37
|
+
def forward(self, pred, batch):
|
|
38
|
+
binary_mask = batch[-1]
|
|
39
|
+
rec_loss = super().forward(pred['rec_output'], batch[:-1])['loss']
|
|
40
|
+
output = pred
|
|
41
|
+
loss_binary = self.loss_weight_binary * BanlanceMultiClassCrossEntropyLoss(
|
|
42
|
+
output['pred_binary'], binary_mask)
|
|
43
|
+
|
|
44
|
+
return {
|
|
45
|
+
'loss': rec_loss + loss_binary,
|
|
46
|
+
'rec_loss': rec_loss,
|
|
47
|
+
'loss_binary': loss_binary
|
|
48
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn.functional as F
|
|
3
|
+
from torch import nn
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CDistNetLoss(nn.Module):
|
|
7
|
+
|
|
8
|
+
def __init__(self, smoothing=True, ignore_index=0, **kwargs):
|
|
9
|
+
super(CDistNetLoss, self).__init__()
|
|
10
|
+
if ignore_index >= 0 and not smoothing:
|
|
11
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean',
|
|
12
|
+
ignore_index=ignore_index)
|
|
13
|
+
self.smoothing = smoothing
|
|
14
|
+
|
|
15
|
+
def forward(self, pred, batch):
|
|
16
|
+
pred = pred['res']
|
|
17
|
+
tgt = batch[1][:, 1:]
|
|
18
|
+
pred = pred.reshape([-1, pred.shape[2]])
|
|
19
|
+
tgt = tgt.reshape([-1])
|
|
20
|
+
if self.smoothing:
|
|
21
|
+
eps = 0.1
|
|
22
|
+
n_class = pred.shape[1]
|
|
23
|
+
one_hot = F.one_hot(tgt.long(), num_classes=pred.shape[1])
|
|
24
|
+
torch.set_printoptions(profile='full')
|
|
25
|
+
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
|
|
26
|
+
log_prb = F.log_softmax(pred, dim=1)
|
|
27
|
+
non_pad_mask = torch.not_equal(
|
|
28
|
+
tgt, torch.zeros(tgt.shape, dtype=tgt.dtype,
|
|
29
|
+
device=tgt.device))
|
|
30
|
+
loss = -(one_hot * log_prb).sum(dim=1)
|
|
31
|
+
loss = loss.masked_select(non_pad_mask).mean()
|
|
32
|
+
else:
|
|
33
|
+
loss = self.loss_func(pred, tgt)
|
|
34
|
+
return {'loss': loss}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn.functional as F
|
|
3
|
+
from torch import nn
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CELoss(nn.Module):
|
|
7
|
+
|
|
8
|
+
def __init__(self,
|
|
9
|
+
smoothing=False,
|
|
10
|
+
with_all=False,
|
|
11
|
+
ignore_index=-1,
|
|
12
|
+
**kwargs):
|
|
13
|
+
super(CELoss, self).__init__()
|
|
14
|
+
if ignore_index >= 0:
|
|
15
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean',
|
|
16
|
+
ignore_index=ignore_index)
|
|
17
|
+
else:
|
|
18
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean')
|
|
19
|
+
self.smoothing = smoothing
|
|
20
|
+
self.with_all = with_all
|
|
21
|
+
|
|
22
|
+
def forward(self, pred, batch):
|
|
23
|
+
pred = pred['res']
|
|
24
|
+
if isinstance(pred, dict): # for ABINet
|
|
25
|
+
loss = {}
|
|
26
|
+
loss_sum = []
|
|
27
|
+
for name, logits in pred.items():
|
|
28
|
+
if isinstance(logits, list):
|
|
29
|
+
logit_num = len(logits)
|
|
30
|
+
all_tgt = torch.cat([batch[1]] * logit_num, 0)
|
|
31
|
+
all_logits = torch.cat(logits, 0)
|
|
32
|
+
flt_logtis = all_logits.reshape([-1, all_logits.shape[2]])
|
|
33
|
+
flt_tgt = all_tgt.reshape([-1])
|
|
34
|
+
else:
|
|
35
|
+
flt_logtis = logits.reshape([-1, logits.shape[2]])
|
|
36
|
+
flt_tgt = batch[1].reshape([-1])
|
|
37
|
+
loss[name + '_loss'] = self.loss_func(flt_logtis, flt_tgt)
|
|
38
|
+
loss_sum.append(loss[name + '_loss'])
|
|
39
|
+
loss['loss'] = sum(loss_sum)
|
|
40
|
+
return loss
|
|
41
|
+
else:
|
|
42
|
+
if self.with_all: # for ViTSTR
|
|
43
|
+
tgt = batch[1]
|
|
44
|
+
pred = pred.reshape([-1, pred.shape[2]])
|
|
45
|
+
tgt = tgt.reshape([-1])
|
|
46
|
+
loss = self.loss_func(pred, tgt)
|
|
47
|
+
return {'loss': loss}
|
|
48
|
+
else: # for NRTR
|
|
49
|
+
max_len = batch[2].max()
|
|
50
|
+
tgt = batch[1][:, 1:2 + max_len]
|
|
51
|
+
pred = pred.reshape([-1, pred.shape[2]])
|
|
52
|
+
tgt = tgt.reshape([-1])
|
|
53
|
+
if self.smoothing:
|
|
54
|
+
eps = 0.1
|
|
55
|
+
pred.shape[1]
|
|
56
|
+
one_hot = F.one_hot(tgt, pred.shape[1])
|
|
57
|
+
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (-1)
|
|
58
|
+
log_prb = F.log_softmax(pred, dim=1)
|
|
59
|
+
non_pad_mask = torch.not_equal(
|
|
60
|
+
tgt,
|
|
61
|
+
torch.zeros(tgt.shape,
|
|
62
|
+
dtype=tgt.dtype,
|
|
63
|
+
device=tgt.device))
|
|
64
|
+
loss = -(one_hot * log_prb).sum(dim=1)
|
|
65
|
+
loss = loss.masked_select(non_pad_mask).mean()
|
|
66
|
+
else:
|
|
67
|
+
loss = self.loss_func(pred, tgt)
|
|
68
|
+
return {'loss': loss}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn.functional as F
|
|
3
|
+
from torch import nn
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CPPDLoss(nn.Module):
|
|
7
|
+
|
|
8
|
+
def __init__(self,
|
|
9
|
+
smoothing=False,
|
|
10
|
+
ignore_index=100,
|
|
11
|
+
pos_len=False,
|
|
12
|
+
sideloss_weight=1.0,
|
|
13
|
+
max_len=25,
|
|
14
|
+
**kwargs):
|
|
15
|
+
super(CPPDLoss, self).__init__()
|
|
16
|
+
self.edge_ce = nn.CrossEntropyLoss(reduction='mean',
|
|
17
|
+
ignore_index=ignore_index)
|
|
18
|
+
self.char_node_ce = nn.CrossEntropyLoss(reduction='mean')
|
|
19
|
+
if pos_len:
|
|
20
|
+
self.pos_node_ce = nn.CrossEntropyLoss(reduction='mean',
|
|
21
|
+
ignore_index=ignore_index)
|
|
22
|
+
else:
|
|
23
|
+
self.pos_node_ce = nn.BCEWithLogitsLoss(reduction='mean')
|
|
24
|
+
|
|
25
|
+
self.smoothing = smoothing
|
|
26
|
+
self.ignore_index = ignore_index
|
|
27
|
+
self.pos_len = pos_len
|
|
28
|
+
self.sideloss_weight = sideloss_weight
|
|
29
|
+
self.max_len = max_len + 1
|
|
30
|
+
|
|
31
|
+
def label_smoothing_ce(self, preds, targets):
|
|
32
|
+
zeros_ = torch.zeros_like(targets)
|
|
33
|
+
ignore_index_ = zeros_ + self.ignore_index
|
|
34
|
+
non_pad_mask = torch.not_equal(targets, ignore_index_)
|
|
35
|
+
|
|
36
|
+
tgts = torch.where(targets == ignore_index_, zeros_, targets)
|
|
37
|
+
eps = 0.1
|
|
38
|
+
n_class = preds.shape[1]
|
|
39
|
+
one_hot = F.one_hot(tgts, preds.shape[1])
|
|
40
|
+
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
|
|
41
|
+
log_prb = F.log_softmax(preds, dim=1)
|
|
42
|
+
loss = -(one_hot * log_prb).sum(dim=1)
|
|
43
|
+
loss = loss.masked_select(non_pad_mask).mean()
|
|
44
|
+
return loss
|
|
45
|
+
|
|
46
|
+
def forward(self, pred, batch):
|
|
47
|
+
node_feats, edge_feats = pred
|
|
48
|
+
node_tgt = batch[2]
|
|
49
|
+
char_tgt = batch[1]
|
|
50
|
+
|
|
51
|
+
# updated code
|
|
52
|
+
char_num_label = torch.clip(node_tgt[:, :-self.max_len].flatten(0, 1),
|
|
53
|
+
0, node_feats[0].shape[-1] - 1)
|
|
54
|
+
loss_char_node = self.char_node_ce(node_feats[0].flatten(0, 1),
|
|
55
|
+
char_num_label)
|
|
56
|
+
if self.pos_len:
|
|
57
|
+
loss_pos_node = self.pos_node_ce(
|
|
58
|
+
node_feats[1].flatten(0, 1),
|
|
59
|
+
node_tgt[:, -self.max_len:].flatten(0, 1))
|
|
60
|
+
else:
|
|
61
|
+
loss_pos_node = self.pos_node_ce(
|
|
62
|
+
node_feats[1].flatten(0, 1),
|
|
63
|
+
node_tgt[:, -self.max_len:].flatten(0, 1).float())
|
|
64
|
+
loss_node = loss_char_node + loss_pos_node
|
|
65
|
+
# -----
|
|
66
|
+
edge_feats = edge_feats.flatten(0, 1)
|
|
67
|
+
char_tgt = char_tgt.flatten(0, 1)
|
|
68
|
+
if self.smoothing:
|
|
69
|
+
loss_edge = self.label_smoothing_ce(edge_feats, char_tgt)
|
|
70
|
+
else:
|
|
71
|
+
loss_edge = self.edge_ce(edge_feats, char_tgt)
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
'loss': self.sideloss_weight * loss_node + loss_edge,
|
|
75
|
+
'loss_node': self.sideloss_weight * loss_node,
|
|
76
|
+
'loss_edge': loss_edge,
|
|
77
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class CTCLoss(nn.Module):
|
|
6
|
+
|
|
7
|
+
def __init__(self, use_focal_loss=False, zero_infinity=False, **kwargs):
|
|
8
|
+
super(CTCLoss, self).__init__()
|
|
9
|
+
self.loss_func = nn.CTCLoss(blank=0,
|
|
10
|
+
reduction='none',
|
|
11
|
+
zero_infinity=zero_infinity)
|
|
12
|
+
self.use_focal_loss = use_focal_loss
|
|
13
|
+
|
|
14
|
+
def forward(self, predicts, batch):
|
|
15
|
+
# predicts = predicts['res']
|
|
16
|
+
|
|
17
|
+
batch_size = predicts.size(0)
|
|
18
|
+
label, label_length = batch[1], batch[2]
|
|
19
|
+
predicts = predicts.log_softmax(2)
|
|
20
|
+
predicts = predicts.permute(1, 0, 2)
|
|
21
|
+
preds_lengths = torch.tensor([predicts.size(0)] * batch_size,
|
|
22
|
+
dtype=torch.long)
|
|
23
|
+
loss = self.loss_func(predicts, label, preds_lengths, label_length)
|
|
24
|
+
|
|
25
|
+
if self.use_focal_loss:
|
|
26
|
+
# Use torch.clamp to limit the range of loss, avoiding overflow in exponential calculation
|
|
27
|
+
clamped_loss = torch.clamp(loss, min=-20, max=20)
|
|
28
|
+
weight = 1 - torch.exp(-clamped_loss)
|
|
29
|
+
weight = torch.square(weight)
|
|
30
|
+
# Use torch.where to avoid multiplying by zero weight
|
|
31
|
+
loss = torch.where(weight > 0, loss * weight, loss)
|
|
32
|
+
loss = loss.mean()
|
|
33
|
+
return {'loss': loss}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from torch import nn
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class LISTERLoss(nn.Module):
|
|
5
|
+
|
|
6
|
+
def __init__(self, **kwargs):
|
|
7
|
+
super(LISTERLoss, self).__init__()
|
|
8
|
+
|
|
9
|
+
def forward(self, predicts, batch):
|
|
10
|
+
# predicts = predicts['res']
|
|
11
|
+
# loss = predicts
|
|
12
|
+
if isinstance(predicts, list):
|
|
13
|
+
predicts = predicts[0]
|
|
14
|
+
return predicts
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import torch.nn.functional as F
|
|
2
|
+
from torch import nn
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LPVLoss(nn.Module):
|
|
6
|
+
|
|
7
|
+
def __init__(self, label_smoothing=0.0, **kwargs):
|
|
8
|
+
super(LPVLoss, self).__init__()
|
|
9
|
+
self.label_smoothing = label_smoothing
|
|
10
|
+
|
|
11
|
+
def forward(self, preds, batch):
|
|
12
|
+
max_len = batch[2].max()
|
|
13
|
+
tgt = batch[1][:, 1:2 + max_len]
|
|
14
|
+
|
|
15
|
+
tgt = tgt.flatten(0, 1)
|
|
16
|
+
loss = 0
|
|
17
|
+
loss_dict = {}
|
|
18
|
+
for i, pred in enumerate(preds):
|
|
19
|
+
pred = pred.flatten(0, 1)
|
|
20
|
+
loss_i = F.cross_entropy(
|
|
21
|
+
pred,
|
|
22
|
+
tgt,
|
|
23
|
+
reduction='mean',
|
|
24
|
+
label_smoothing=self.label_smoothing,
|
|
25
|
+
ignore_index=pred.shape[1] + 1,
|
|
26
|
+
) # self.loss_func(pred, tgt)
|
|
27
|
+
loss += loss_i
|
|
28
|
+
loss_dict['loss' + str(i)] = loss_i
|
|
29
|
+
loss_dict['loss'] = loss
|
|
30
|
+
return loss_dict
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from torch import nn
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MGPLoss(nn.Module):
|
|
5
|
+
|
|
6
|
+
def __init__(self, only_char=False, **kwargs):
|
|
7
|
+
super(MGPLoss, self).__init__()
|
|
8
|
+
self.ce = nn.CrossEntropyLoss(reduction='mean', ignore_index=0)
|
|
9
|
+
self.only_char = only_char
|
|
10
|
+
|
|
11
|
+
def forward(self, pred, batch):
|
|
12
|
+
if self.only_char:
|
|
13
|
+
char_feats = pred
|
|
14
|
+
char_tgt = batch[1].flatten(0, 1)
|
|
15
|
+
char_loss = self.ce(char_feats.flatten(0, 1), char_tgt)
|
|
16
|
+
return {'loss': char_loss}
|
|
17
|
+
else:
|
|
18
|
+
return self.forward_all(pred, batch)
|
|
19
|
+
|
|
20
|
+
def forward_all(self, pred, batch):
|
|
21
|
+
char_feats, dpe_feats, wp_feats = pred
|
|
22
|
+
char_tgt = batch[1].flatten(0, 1)
|
|
23
|
+
dpe_tgt = batch[2].flatten(0, 1)
|
|
24
|
+
wp_tgt = batch[3].flatten(0, 1)
|
|
25
|
+
char_loss = self.ce(char_feats.flatten(0, 1), char_tgt)
|
|
26
|
+
dpe_loss = self.ce(dpe_feats.flatten(0, 1), dpe_tgt)
|
|
27
|
+
wp_loss = self.ce(wp_feats.flatten(0, 1), wp_tgt)
|
|
28
|
+
loss = char_loss + dpe_loss + wp_loss
|
|
29
|
+
return {
|
|
30
|
+
'loss': loss,
|
|
31
|
+
'char_loss': char_loss,
|
|
32
|
+
'dpe_loss': dpe_loss,
|
|
33
|
+
'wp_loss': wp_loss
|
|
34
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from torch import nn
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class RobustScannerLoss(nn.Module):
|
|
5
|
+
|
|
6
|
+
def __init__(self, **kwargs):
|
|
7
|
+
super(RobustScannerLoss, self).__init__()
|
|
8
|
+
ignore_index = kwargs.get('ignore_index', 38)
|
|
9
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean',
|
|
10
|
+
ignore_index=ignore_index)
|
|
11
|
+
|
|
12
|
+
def forward(self, pred, batch):
|
|
13
|
+
pred = pred[:, :-1, :]
|
|
14
|
+
|
|
15
|
+
label = batch[1][:, 1:].reshape([-1])
|
|
16
|
+
|
|
17
|
+
inputs = pred.reshape([-1, pred.shape[2]])
|
|
18
|
+
|
|
19
|
+
loss = self.loss_func(inputs, label)
|
|
20
|
+
return {'loss': loss}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import torch.nn.functional as F
|
|
2
|
+
from torch import nn
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CosineEmbeddingLoss(nn.Module):
|
|
7
|
+
|
|
8
|
+
def __init__(self, margin=0.0):
|
|
9
|
+
super(CosineEmbeddingLoss, self).__init__()
|
|
10
|
+
self.margin = margin
|
|
11
|
+
self.epsilon = 1e-12
|
|
12
|
+
|
|
13
|
+
def forward(self, x1, x2):
|
|
14
|
+
similarity = torch.sum(x1 * x2, axis=-1) / (
|
|
15
|
+
torch.norm(x1, dim=-1) * torch.norm(x2, dim=-1) + self.epsilon)
|
|
16
|
+
return (1 - similarity).mean()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SEEDLoss(nn.Module):
|
|
20
|
+
|
|
21
|
+
def __init__(self, label_smoothing=0.1, ignore_index=0, **kwargs):
|
|
22
|
+
super(SEEDLoss, self).__init__()
|
|
23
|
+
self.label_smoothing = label_smoothing
|
|
24
|
+
self.loss_sem = CosineEmbeddingLoss()
|
|
25
|
+
|
|
26
|
+
def forward(self, preds, batch):
|
|
27
|
+
embedding_vectors, pred = preds
|
|
28
|
+
max_len = batch[2].max()
|
|
29
|
+
tgt = batch[1][:, 1:2 + max_len]
|
|
30
|
+
pred = pred.flatten(0, 1)
|
|
31
|
+
tgt = tgt.reshape([-1])
|
|
32
|
+
loss = F.cross_entropy(
|
|
33
|
+
pred,
|
|
34
|
+
tgt,
|
|
35
|
+
reduction='mean',
|
|
36
|
+
label_smoothing=self.label_smoothing,
|
|
37
|
+
ignore_index=pred.shape[1] + 1,
|
|
38
|
+
) # self.loss_func(pred, tgt)
|
|
39
|
+
sem_target = batch[3].float()
|
|
40
|
+
|
|
41
|
+
sem_loss = torch.sum(self.loss_sem(embedding_vectors, sem_target))
|
|
42
|
+
return {
|
|
43
|
+
'loss': loss + 0.1 * sem_loss,
|
|
44
|
+
'rec_loss': loss,
|
|
45
|
+
'sem_loss': sem_loss
|
|
46
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import torch.nn.functional as F
|
|
2
|
+
from torch import nn
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SRNLoss(nn.Module):
|
|
6
|
+
|
|
7
|
+
def __init__(self, label_smoothing=0.0, **kwargs):
|
|
8
|
+
super(SRNLoss, self).__init__()
|
|
9
|
+
self.label_smoothing = label_smoothing
|
|
10
|
+
|
|
11
|
+
def forward(self, preds, batch):
|
|
12
|
+
pvam_preds, gsrm_preds, vsfd_preds = preds
|
|
13
|
+
|
|
14
|
+
label = batch[1].reshape([-1])
|
|
15
|
+
|
|
16
|
+
ignore_index = pvam_preds.shape[-1] + 1
|
|
17
|
+
|
|
18
|
+
loss_pvam = F.cross_entropy(pvam_preds,
|
|
19
|
+
label,
|
|
20
|
+
reduction='mean',
|
|
21
|
+
label_smoothing=self.label_smoothing,
|
|
22
|
+
ignore_index=ignore_index)
|
|
23
|
+
loss_gsrm = F.cross_entropy(gsrm_preds,
|
|
24
|
+
label,
|
|
25
|
+
reduction='mean',
|
|
26
|
+
label_smoothing=self.label_smoothing,
|
|
27
|
+
ignore_index=ignore_index)
|
|
28
|
+
loss_vsfd = F.cross_entropy(vsfd_preds,
|
|
29
|
+
label,
|
|
30
|
+
reduction='mean',
|
|
31
|
+
label_smoothing=self.label_smoothing,
|
|
32
|
+
ignore_index=ignore_index)
|
|
33
|
+
|
|
34
|
+
loss_dict = {}
|
|
35
|
+
loss_dict['loss_pvam'] = loss_pvam
|
|
36
|
+
loss_dict['loss_gsrm'] = loss_gsrm
|
|
37
|
+
loss_dict['loss_vsfd'] = loss_vsfd
|
|
38
|
+
|
|
39
|
+
loss_dict['loss'] = loss_pvam * 3.0 + loss_gsrm * 0.15 + loss_vsfd
|
|
40
|
+
return loss_dict
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn.functional as F
|
|
3
|
+
from torch import nn
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def flatten_label(target):
|
|
7
|
+
label_flatten = []
|
|
8
|
+
label_length = []
|
|
9
|
+
for i in range(0, target.size()[0]):
|
|
10
|
+
cur_label = target[i].tolist()
|
|
11
|
+
label_flatten += cur_label[:cur_label.index(0) + 1]
|
|
12
|
+
label_length.append(cur_label.index(0) + 1)
|
|
13
|
+
label_flatten = torch.LongTensor(label_flatten)
|
|
14
|
+
label_length = torch.IntTensor(label_length)
|
|
15
|
+
return (label_flatten, label_length)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _flatten(sources, lengths):
|
|
19
|
+
return torch.cat([t[:l] for t, l in zip(sources, lengths)])
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VisionLANLoss(nn.Module):
|
|
23
|
+
|
|
24
|
+
def __init__(self,
|
|
25
|
+
training_step='LA',
|
|
26
|
+
ratio_res=0.5,
|
|
27
|
+
ratio_sub=0.5,
|
|
28
|
+
**kwargs):
|
|
29
|
+
super(VisionLANLoss, self).__init__()
|
|
30
|
+
self.loss_func = nn.CrossEntropyLoss(reduction='mean')
|
|
31
|
+
self.ratio_res = ratio_res
|
|
32
|
+
self.ratio_sub = ratio_sub
|
|
33
|
+
assert training_step in ['LF_1', 'LF_2', 'LA']
|
|
34
|
+
self.training_step = training_step
|
|
35
|
+
|
|
36
|
+
def forward(self, pred, batch):
|
|
37
|
+
text_pre, text_rem, text_mas, _ = pred
|
|
38
|
+
target = batch[1].to(dtype=torch.int64)
|
|
39
|
+
label_flatten, length = flatten_label(target)
|
|
40
|
+
text_pre = _flatten(text_pre, length)
|
|
41
|
+
if self.training_step == 'LF_1':
|
|
42
|
+
loss = self.loss_func(text_pre, label_flatten.to(text_pre.device))
|
|
43
|
+
else:
|
|
44
|
+
target_res = batch[2].to(dtype=torch.int64)
|
|
45
|
+
target_sub = batch[3].to(dtype=torch.int64)
|
|
46
|
+
label_flatten_res, length_res = flatten_label(target_res)
|
|
47
|
+
label_flatten_sub, length_sub = flatten_label(target_sub)
|
|
48
|
+
text_rem = _flatten(text_rem, length_res)
|
|
49
|
+
text_mas = _flatten(text_mas, length_sub)
|
|
50
|
+
loss_ori = self.loss_func(text_pre,
|
|
51
|
+
label_flatten.to(text_pre.device))
|
|
52
|
+
loss_res = self.loss_func(text_rem,
|
|
53
|
+
label_flatten_res.to(text_rem.device))
|
|
54
|
+
loss_mas = self.loss_func(text_mas,
|
|
55
|
+
label_flatten_sub.to(text_mas.device))
|
|
56
|
+
loss = loss_ori + loss_res * self.ratio_res + loss_mas * self.ratio_sub
|
|
57
|
+
|
|
58
|
+
return {'loss': loss}
|