pyxllib 0.3.96__py3-none-any.whl → 0.3.197__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyxllib/algo/geo.py +12 -0
- pyxllib/algo/intervals.py +1 -1
- pyxllib/algo/matcher.py +78 -0
- pyxllib/algo/pupil.py +187 -19
- pyxllib/algo/specialist.py +2 -1
- pyxllib/algo/stat.py +38 -2
- {pyxlpr → pyxllib/autogui}/__init__.py +1 -1
- pyxllib/autogui/activewin.py +246 -0
- pyxllib/autogui/all.py +9 -0
- pyxllib/{ext/autogui → autogui}/autogui.py +40 -11
- pyxllib/autogui/uiautolib.py +362 -0
- pyxllib/autogui/wechat.py +827 -0
- pyxllib/autogui/wechat_msg.py +421 -0
- pyxllib/autogui/wxautolib.py +84 -0
- pyxllib/cv/slidercaptcha.py +137 -0
- pyxllib/data/echarts.py +123 -12
- pyxllib/data/jsonlib.py +89 -0
- pyxllib/data/pglib.py +514 -30
- pyxllib/data/sqlite.py +231 -4
- pyxllib/ext/JLineViewer.py +14 -1
- pyxllib/ext/drissionlib.py +277 -0
- pyxllib/ext/kq5034lib.py +0 -1594
- pyxllib/ext/robustprocfile.py +497 -0
- pyxllib/ext/unixlib.py +6 -5
- pyxllib/ext/utools.py +108 -95
- pyxllib/ext/webhook.py +32 -14
- pyxllib/ext/wjxlib.py +88 -0
- pyxllib/ext/wpsapi.py +124 -0
- pyxllib/ext/xlwork.py +9 -0
- pyxllib/ext/yuquelib.py +1003 -71
- pyxllib/file/docxlib.py +1 -1
- pyxllib/file/libreoffice.py +165 -0
- pyxllib/file/movielib.py +9 -0
- pyxllib/file/packlib/__init__.py +112 -75
- pyxllib/file/pdflib.py +1 -1
- pyxllib/file/pupil.py +1 -1
- pyxllib/file/specialist/dirlib.py +1 -1
- pyxllib/file/specialist/download.py +10 -3
- pyxllib/file/specialist/filelib.py +266 -55
- pyxllib/file/xlsxlib.py +205 -50
- pyxllib/file/xlsyncfile.py +341 -0
- pyxllib/prog/cachetools.py +64 -0
- pyxllib/prog/filelock.py +42 -0
- pyxllib/prog/multiprogs.py +940 -0
- pyxllib/prog/newbie.py +9 -2
- pyxllib/prog/pupil.py +129 -60
- pyxllib/prog/specialist/__init__.py +176 -2
- pyxllib/prog/specialist/bc.py +5 -2
- pyxllib/prog/specialist/browser.py +11 -2
- pyxllib/prog/specialist/datetime.py +68 -0
- pyxllib/prog/specialist/tictoc.py +12 -13
- pyxllib/prog/specialist/xllog.py +5 -5
- pyxllib/prog/xlosenv.py +7 -0
- pyxllib/text/airscript.js +744 -0
- pyxllib/text/charclasslib.py +17 -5
- pyxllib/text/jiebalib.py +6 -3
- pyxllib/text/jinjalib.py +32 -0
- pyxllib/text/jsa_ai_prompt.md +271 -0
- pyxllib/text/jscode.py +159 -4
- pyxllib/text/nestenv.py +1 -1
- pyxllib/text/newbie.py +12 -0
- pyxllib/text/pupil/common.py +26 -0
- pyxllib/text/specialist/ptag.py +2 -2
- pyxllib/text/templates/echart_base.html +11 -0
- pyxllib/text/templates/highlight_code.html +17 -0
- pyxllib/text/templates/latex_editor.html +103 -0
- pyxllib/text/xmllib.py +76 -14
- pyxllib/xl.py +2 -1
- pyxllib-0.3.197.dist-info/METADATA +48 -0
- pyxllib-0.3.197.dist-info/RECORD +126 -0
- {pyxllib-0.3.96.dist-info → pyxllib-0.3.197.dist-info}/WHEEL +1 -2
- pyxllib/ext/autogui/__init__.py +0 -8
- pyxllib-0.3.96.dist-info/METADATA +0 -51
- pyxllib-0.3.96.dist-info/RECORD +0 -333
- pyxllib-0.3.96.dist-info/top_level.txt +0 -2
- pyxlpr/ai/__init__.py +0 -5
- pyxlpr/ai/clientlib.py +0 -1281
- pyxlpr/ai/specialist.py +0 -286
- pyxlpr/ai/torch_app.py +0 -172
- pyxlpr/ai/xlpaddle.py +0 -655
- pyxlpr/ai/xltorch.py +0 -705
- pyxlpr/data/__init__.py +0 -11
- pyxlpr/data/coco.py +0 -1325
- pyxlpr/data/datacls.py +0 -365
- pyxlpr/data/datasets.py +0 -200
- pyxlpr/data/gptlib.py +0 -1291
- pyxlpr/data/icdar/__init__.py +0 -96
- pyxlpr/data/icdar/deteval.py +0 -377
- pyxlpr/data/icdar/icdar2013.py +0 -341
- pyxlpr/data/icdar/iou.py +0 -340
- pyxlpr/data/icdar/rrc_evaluation_funcs_1_1.py +0 -463
- pyxlpr/data/imtextline.py +0 -473
- pyxlpr/data/labelme.py +0 -866
- pyxlpr/data/removeline.py +0 -179
- pyxlpr/data/specialist.py +0 -57
- pyxlpr/eval/__init__.py +0 -85
- pyxlpr/paddleocr.py +0 -776
- pyxlpr/ppocr/__init__.py +0 -15
- pyxlpr/ppocr/configs/rec/multi_language/generate_multi_language_configs.py +0 -226
- pyxlpr/ppocr/data/__init__.py +0 -135
- pyxlpr/ppocr/data/imaug/ColorJitter.py +0 -26
- pyxlpr/ppocr/data/imaug/__init__.py +0 -67
- pyxlpr/ppocr/data/imaug/copy_paste.py +0 -170
- pyxlpr/ppocr/data/imaug/east_process.py +0 -437
- pyxlpr/ppocr/data/imaug/gen_table_mask.py +0 -244
- pyxlpr/ppocr/data/imaug/iaa_augment.py +0 -114
- pyxlpr/ppocr/data/imaug/label_ops.py +0 -789
- pyxlpr/ppocr/data/imaug/make_border_map.py +0 -184
- pyxlpr/ppocr/data/imaug/make_pse_gt.py +0 -106
- pyxlpr/ppocr/data/imaug/make_shrink_map.py +0 -126
- pyxlpr/ppocr/data/imaug/operators.py +0 -433
- pyxlpr/ppocr/data/imaug/pg_process.py +0 -906
- pyxlpr/ppocr/data/imaug/randaugment.py +0 -143
- pyxlpr/ppocr/data/imaug/random_crop_data.py +0 -239
- pyxlpr/ppocr/data/imaug/rec_img_aug.py +0 -533
- pyxlpr/ppocr/data/imaug/sast_process.py +0 -777
- pyxlpr/ppocr/data/imaug/text_image_aug/__init__.py +0 -17
- pyxlpr/ppocr/data/imaug/text_image_aug/augment.py +0 -120
- pyxlpr/ppocr/data/imaug/text_image_aug/warp_mls.py +0 -168
- pyxlpr/ppocr/data/lmdb_dataset.py +0 -115
- pyxlpr/ppocr/data/pgnet_dataset.py +0 -104
- pyxlpr/ppocr/data/pubtab_dataset.py +0 -107
- pyxlpr/ppocr/data/simple_dataset.py +0 -372
- pyxlpr/ppocr/losses/__init__.py +0 -61
- pyxlpr/ppocr/losses/ace_loss.py +0 -52
- pyxlpr/ppocr/losses/basic_loss.py +0 -135
- pyxlpr/ppocr/losses/center_loss.py +0 -88
- pyxlpr/ppocr/losses/cls_loss.py +0 -30
- pyxlpr/ppocr/losses/combined_loss.py +0 -67
- pyxlpr/ppocr/losses/det_basic_loss.py +0 -208
- pyxlpr/ppocr/losses/det_db_loss.py +0 -80
- pyxlpr/ppocr/losses/det_east_loss.py +0 -63
- pyxlpr/ppocr/losses/det_pse_loss.py +0 -149
- pyxlpr/ppocr/losses/det_sast_loss.py +0 -121
- pyxlpr/ppocr/losses/distillation_loss.py +0 -272
- pyxlpr/ppocr/losses/e2e_pg_loss.py +0 -140
- pyxlpr/ppocr/losses/kie_sdmgr_loss.py +0 -113
- pyxlpr/ppocr/losses/rec_aster_loss.py +0 -99
- pyxlpr/ppocr/losses/rec_att_loss.py +0 -39
- pyxlpr/ppocr/losses/rec_ctc_loss.py +0 -44
- pyxlpr/ppocr/losses/rec_enhanced_ctc_loss.py +0 -70
- pyxlpr/ppocr/losses/rec_nrtr_loss.py +0 -30
- pyxlpr/ppocr/losses/rec_sar_loss.py +0 -28
- pyxlpr/ppocr/losses/rec_srn_loss.py +0 -47
- pyxlpr/ppocr/losses/table_att_loss.py +0 -109
- pyxlpr/ppocr/metrics/__init__.py +0 -44
- pyxlpr/ppocr/metrics/cls_metric.py +0 -45
- pyxlpr/ppocr/metrics/det_metric.py +0 -82
- pyxlpr/ppocr/metrics/distillation_metric.py +0 -73
- pyxlpr/ppocr/metrics/e2e_metric.py +0 -86
- pyxlpr/ppocr/metrics/eval_det_iou.py +0 -274
- pyxlpr/ppocr/metrics/kie_metric.py +0 -70
- pyxlpr/ppocr/metrics/rec_metric.py +0 -75
- pyxlpr/ppocr/metrics/table_metric.py +0 -50
- pyxlpr/ppocr/modeling/architectures/__init__.py +0 -32
- pyxlpr/ppocr/modeling/architectures/base_model.py +0 -88
- pyxlpr/ppocr/modeling/architectures/distillation_model.py +0 -60
- pyxlpr/ppocr/modeling/backbones/__init__.py +0 -54
- pyxlpr/ppocr/modeling/backbones/det_mobilenet_v3.py +0 -268
- pyxlpr/ppocr/modeling/backbones/det_resnet_vd.py +0 -246
- pyxlpr/ppocr/modeling/backbones/det_resnet_vd_sast.py +0 -285
- pyxlpr/ppocr/modeling/backbones/e2e_resnet_vd_pg.py +0 -265
- pyxlpr/ppocr/modeling/backbones/kie_unet_sdmgr.py +0 -186
- pyxlpr/ppocr/modeling/backbones/rec_mobilenet_v3.py +0 -138
- pyxlpr/ppocr/modeling/backbones/rec_mv1_enhance.py +0 -258
- pyxlpr/ppocr/modeling/backbones/rec_nrtr_mtb.py +0 -48
- pyxlpr/ppocr/modeling/backbones/rec_resnet_31.py +0 -210
- pyxlpr/ppocr/modeling/backbones/rec_resnet_aster.py +0 -143
- pyxlpr/ppocr/modeling/backbones/rec_resnet_fpn.py +0 -307
- pyxlpr/ppocr/modeling/backbones/rec_resnet_vd.py +0 -286
- pyxlpr/ppocr/modeling/heads/__init__.py +0 -54
- pyxlpr/ppocr/modeling/heads/cls_head.py +0 -52
- pyxlpr/ppocr/modeling/heads/det_db_head.py +0 -118
- pyxlpr/ppocr/modeling/heads/det_east_head.py +0 -121
- pyxlpr/ppocr/modeling/heads/det_pse_head.py +0 -37
- pyxlpr/ppocr/modeling/heads/det_sast_head.py +0 -128
- pyxlpr/ppocr/modeling/heads/e2e_pg_head.py +0 -253
- pyxlpr/ppocr/modeling/heads/kie_sdmgr_head.py +0 -206
- pyxlpr/ppocr/modeling/heads/multiheadAttention.py +0 -163
- pyxlpr/ppocr/modeling/heads/rec_aster_head.py +0 -393
- pyxlpr/ppocr/modeling/heads/rec_att_head.py +0 -202
- pyxlpr/ppocr/modeling/heads/rec_ctc_head.py +0 -88
- pyxlpr/ppocr/modeling/heads/rec_nrtr_head.py +0 -826
- pyxlpr/ppocr/modeling/heads/rec_sar_head.py +0 -402
- pyxlpr/ppocr/modeling/heads/rec_srn_head.py +0 -280
- pyxlpr/ppocr/modeling/heads/self_attention.py +0 -406
- pyxlpr/ppocr/modeling/heads/table_att_head.py +0 -246
- pyxlpr/ppocr/modeling/necks/__init__.py +0 -32
- pyxlpr/ppocr/modeling/necks/db_fpn.py +0 -111
- pyxlpr/ppocr/modeling/necks/east_fpn.py +0 -188
- pyxlpr/ppocr/modeling/necks/fpn.py +0 -138
- pyxlpr/ppocr/modeling/necks/pg_fpn.py +0 -314
- pyxlpr/ppocr/modeling/necks/rnn.py +0 -92
- pyxlpr/ppocr/modeling/necks/sast_fpn.py +0 -284
- pyxlpr/ppocr/modeling/necks/table_fpn.py +0 -110
- pyxlpr/ppocr/modeling/transforms/__init__.py +0 -28
- pyxlpr/ppocr/modeling/transforms/stn.py +0 -135
- pyxlpr/ppocr/modeling/transforms/tps.py +0 -308
- pyxlpr/ppocr/modeling/transforms/tps_spatial_transformer.py +0 -156
- pyxlpr/ppocr/optimizer/__init__.py +0 -61
- pyxlpr/ppocr/optimizer/learning_rate.py +0 -228
- pyxlpr/ppocr/optimizer/lr_scheduler.py +0 -49
- pyxlpr/ppocr/optimizer/optimizer.py +0 -160
- pyxlpr/ppocr/optimizer/regularizer.py +0 -52
- pyxlpr/ppocr/postprocess/__init__.py +0 -55
- pyxlpr/ppocr/postprocess/cls_postprocess.py +0 -33
- pyxlpr/ppocr/postprocess/db_postprocess.py +0 -234
- pyxlpr/ppocr/postprocess/east_postprocess.py +0 -143
- pyxlpr/ppocr/postprocess/locality_aware_nms.py +0 -200
- pyxlpr/ppocr/postprocess/pg_postprocess.py +0 -52
- pyxlpr/ppocr/postprocess/pse_postprocess/__init__.py +0 -15
- pyxlpr/ppocr/postprocess/pse_postprocess/pse/__init__.py +0 -29
- pyxlpr/ppocr/postprocess/pse_postprocess/pse/setup.py +0 -14
- pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py +0 -118
- pyxlpr/ppocr/postprocess/rec_postprocess.py +0 -654
- pyxlpr/ppocr/postprocess/sast_postprocess.py +0 -355
- pyxlpr/ppocr/tools/__init__.py +0 -14
- pyxlpr/ppocr/tools/eval.py +0 -83
- pyxlpr/ppocr/tools/export_center.py +0 -77
- pyxlpr/ppocr/tools/export_model.py +0 -129
- pyxlpr/ppocr/tools/infer/predict_cls.py +0 -151
- pyxlpr/ppocr/tools/infer/predict_det.py +0 -300
- pyxlpr/ppocr/tools/infer/predict_e2e.py +0 -169
- pyxlpr/ppocr/tools/infer/predict_rec.py +0 -414
- pyxlpr/ppocr/tools/infer/predict_system.py +0 -204
- pyxlpr/ppocr/tools/infer/utility.py +0 -629
- pyxlpr/ppocr/tools/infer_cls.py +0 -83
- pyxlpr/ppocr/tools/infer_det.py +0 -134
- pyxlpr/ppocr/tools/infer_e2e.py +0 -122
- pyxlpr/ppocr/tools/infer_kie.py +0 -153
- pyxlpr/ppocr/tools/infer_rec.py +0 -146
- pyxlpr/ppocr/tools/infer_table.py +0 -107
- pyxlpr/ppocr/tools/program.py +0 -596
- pyxlpr/ppocr/tools/test_hubserving.py +0 -117
- pyxlpr/ppocr/tools/train.py +0 -163
- pyxlpr/ppocr/tools/xlprog.py +0 -748
- pyxlpr/ppocr/utils/EN_symbol_dict.txt +0 -94
- pyxlpr/ppocr/utils/__init__.py +0 -24
- pyxlpr/ppocr/utils/dict/ar_dict.txt +0 -117
- pyxlpr/ppocr/utils/dict/arabic_dict.txt +0 -162
- pyxlpr/ppocr/utils/dict/be_dict.txt +0 -145
- pyxlpr/ppocr/utils/dict/bg_dict.txt +0 -140
- pyxlpr/ppocr/utils/dict/chinese_cht_dict.txt +0 -8421
- pyxlpr/ppocr/utils/dict/cyrillic_dict.txt +0 -163
- pyxlpr/ppocr/utils/dict/devanagari_dict.txt +0 -167
- pyxlpr/ppocr/utils/dict/en_dict.txt +0 -63
- pyxlpr/ppocr/utils/dict/fa_dict.txt +0 -136
- pyxlpr/ppocr/utils/dict/french_dict.txt +0 -136
- pyxlpr/ppocr/utils/dict/german_dict.txt +0 -143
- pyxlpr/ppocr/utils/dict/hi_dict.txt +0 -162
- pyxlpr/ppocr/utils/dict/it_dict.txt +0 -118
- pyxlpr/ppocr/utils/dict/japan_dict.txt +0 -4399
- pyxlpr/ppocr/utils/dict/ka_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/korean_dict.txt +0 -3688
- pyxlpr/ppocr/utils/dict/latin_dict.txt +0 -185
- pyxlpr/ppocr/utils/dict/mr_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/ne_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/oc_dict.txt +0 -96
- pyxlpr/ppocr/utils/dict/pu_dict.txt +0 -130
- pyxlpr/ppocr/utils/dict/rs_dict.txt +0 -91
- pyxlpr/ppocr/utils/dict/rsc_dict.txt +0 -134
- pyxlpr/ppocr/utils/dict/ru_dict.txt +0 -125
- pyxlpr/ppocr/utils/dict/ta_dict.txt +0 -128
- pyxlpr/ppocr/utils/dict/table_dict.txt +0 -277
- pyxlpr/ppocr/utils/dict/table_structure_dict.txt +0 -2759
- pyxlpr/ppocr/utils/dict/te_dict.txt +0 -151
- pyxlpr/ppocr/utils/dict/ug_dict.txt +0 -114
- pyxlpr/ppocr/utils/dict/uk_dict.txt +0 -142
- pyxlpr/ppocr/utils/dict/ur_dict.txt +0 -137
- pyxlpr/ppocr/utils/dict/xi_dict.txt +0 -110
- pyxlpr/ppocr/utils/dict90.txt +0 -90
- pyxlpr/ppocr/utils/e2e_metric/Deteval.py +0 -574
- pyxlpr/ppocr/utils/e2e_metric/polygon_fast.py +0 -83
- pyxlpr/ppocr/utils/e2e_utils/extract_batchsize.py +0 -87
- pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_fast.py +0 -457
- pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_slow.py +0 -592
- pyxlpr/ppocr/utils/e2e_utils/pgnet_pp_utils.py +0 -162
- pyxlpr/ppocr/utils/e2e_utils/visual.py +0 -162
- pyxlpr/ppocr/utils/en_dict.txt +0 -95
- pyxlpr/ppocr/utils/gen_label.py +0 -81
- pyxlpr/ppocr/utils/ic15_dict.txt +0 -36
- pyxlpr/ppocr/utils/iou.py +0 -54
- pyxlpr/ppocr/utils/logging.py +0 -69
- pyxlpr/ppocr/utils/network.py +0 -84
- pyxlpr/ppocr/utils/ppocr_keys_v1.txt +0 -6623
- pyxlpr/ppocr/utils/profiler.py +0 -110
- pyxlpr/ppocr/utils/save_load.py +0 -150
- pyxlpr/ppocr/utils/stats.py +0 -72
- pyxlpr/ppocr/utils/utility.py +0 -80
- pyxlpr/ppstructure/__init__.py +0 -13
- pyxlpr/ppstructure/predict_system.py +0 -187
- pyxlpr/ppstructure/table/__init__.py +0 -13
- pyxlpr/ppstructure/table/eval_table.py +0 -72
- pyxlpr/ppstructure/table/matcher.py +0 -192
- pyxlpr/ppstructure/table/predict_structure.py +0 -136
- pyxlpr/ppstructure/table/predict_table.py +0 -221
- pyxlpr/ppstructure/table/table_metric/__init__.py +0 -16
- pyxlpr/ppstructure/table/table_metric/parallel.py +0 -51
- pyxlpr/ppstructure/table/table_metric/table_metric.py +0 -247
- pyxlpr/ppstructure/table/tablepyxl/__init__.py +0 -13
- pyxlpr/ppstructure/table/tablepyxl/style.py +0 -283
- pyxlpr/ppstructure/table/tablepyxl/tablepyxl.py +0 -118
- pyxlpr/ppstructure/utility.py +0 -71
- pyxlpr/xlai.py +0 -10
- /pyxllib/{ext/autogui → autogui}/virtualkey.py +0 -0
- {pyxllib-0.3.96.dist-info → pyxllib-0.3.197.dist-info/licenses}/LICENSE +0 -0
pyxlpr/ai/specialist.py
DELETED
@@ -1,286 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
# @Author : 陈坤泽
|
4
|
-
# @Email : 877362867@qq.com
|
5
|
-
# @Date : 2021/11/07 14:04
|
6
|
-
|
7
|
-
import os
|
8
|
-
import re
|
9
|
-
import time
|
10
|
-
|
11
|
-
from humanfriendly import format_size
|
12
|
-
import pandas as pd
|
13
|
-
import numpy as np
|
14
|
-
|
15
|
-
from pyxllib.prog.pupil import check_install_package, run_once
|
16
|
-
|
17
|
-
|
18
|
-
class ClasEvaluater:
|
19
|
-
def __init__(self, gt, pred, *, names=None):
|
20
|
-
"""
|
21
|
-
:param names: 可以将id映射到对应的明文名称
|
22
|
-
list,跟id对应名称names[id]
|
23
|
-
dict,使用key映射规则
|
24
|
-
"""
|
25
|
-
assert len(gt) == len(pred), f'gt={len(gt)}和pred={len(pred)}数量不匹配'
|
26
|
-
self.gt, self.pred = gt, pred
|
27
|
-
self.total = len(gt)
|
28
|
-
self.names = names
|
29
|
-
|
30
|
-
@classmethod
|
31
|
-
def from_pairs(cls, pairs):
|
32
|
-
"""
|
33
|
-
:param list|tuple pairs: 一个列表 [(y1, y_hat1), (y2, y_hat2), ...]
|
34
|
-
每个元素还是一个长度2的列表,常见格式是[[0, 0], [1,2], ...]
|
35
|
-
第一个值是gt的类别y,第二个值是程序预测的类别y_hat
|
36
|
-
"""
|
37
|
-
gt, pred = list(zip(*pairs)) # 这里内部的实现算法很多都是分解开的
|
38
|
-
return cls(gt, pred)
|
39
|
-
|
40
|
-
def n_correct(self):
|
41
|
-
""" 类别正确数量 """
|
42
|
-
return sum([y == y_hat for y, y_hat in zip(self.gt, self.pred)])
|
43
|
-
|
44
|
-
def accuracy(self):
|
45
|
-
""" 整体的正确率精度(等价于f1_score的micro) """
|
46
|
-
return round(self.n_correct() / self.total, 4)
|
47
|
-
|
48
|
-
def crosstab(self):
|
49
|
-
""" 各类别具体情况交叉表 """
|
50
|
-
# TODO 用names转明文?
|
51
|
-
df = pd.DataFrame.from_dict({'gt': self.gt, 'pred': self.pred})
|
52
|
-
return pd.crosstab(df['gt'], df['pred'])
|
53
|
-
|
54
|
-
def f1_score(self, average='weighted'):
|
55
|
-
""" 多分类任务是用F1分值 https://zhuanlan.zhihu.com/p/64315175
|
56
|
-
|
57
|
-
:param average:
|
58
|
-
weighted:每一类都算出f1,然后(按样本数)加权平均
|
59
|
-
macro:每一类都算出f1,然后求平均值(样本不均衡下,有的类就算只出现1次,也会造成极大的影响)
|
60
|
-
micro:按二分类形式直接计算全样本的f1,等价于accuracy
|
61
|
-
all:我自己扩展的格式,会返回三种结果的字典值
|
62
|
-
"""
|
63
|
-
check_install_package('sklearn', 'scikit-learn')
|
64
|
-
from sklearn.metrics import f1_score
|
65
|
-
|
66
|
-
if average == 'all':
|
67
|
-
return {f'f1_{k}': self.f1_score(k) for k in ('weighted', 'macro', 'micro')}
|
68
|
-
else:
|
69
|
-
return round(f1_score(self.gt, self.pred, average=average), 4)
|
70
|
-
|
71
|
-
|
72
|
-
class ComputingReceptiveFields:
|
73
|
-
""" 计算感受野大小的工具
|
74
|
-
https://distill.pub/2019/computing-receptive-fields/#return-from-solving-receptive-field-size
|
75
|
-
|
76
|
-
除了这里实现的基础版本的感受野大小计算,论文还能计算具体的区间位置、不规则图形等情况
|
77
|
-
"""
|
78
|
-
|
79
|
-
@classmethod
|
80
|
-
def computing(cls, network):
|
81
|
-
""" 基础的计算工具
|
82
|
-
|
83
|
-
network = [['Conv2D', 5, 1],
|
84
|
-
['MaxPool2D', 2, 2],
|
85
|
-
['Conv2D', 3, 1],
|
86
|
-
['MaxPool2D', 2, 2],
|
87
|
-
['Conv2D', 3, 1],
|
88
|
-
['MaxPool2D', 2, 2]]
|
89
|
-
df = computing(network)
|
90
|
-
"""
|
91
|
-
|
92
|
-
# 0 基本配置数据表,由外部输入
|
93
|
-
columns = ['name', 'kernel_size', 'stride']
|
94
|
-
df = pd.DataFrame.from_records(network, columns=columns)
|
95
|
-
|
96
|
-
# 1 感受野
|
97
|
-
n = len(df)
|
98
|
-
df['receptive'] = 1
|
99
|
-
for i in range(n - 2, -1, -1):
|
100
|
-
x = df.loc[i + 1]
|
101
|
-
df.loc[i, 'receptive'] = x['stride'] * x['receptive'] + (x['kernel_size'] - x['stride'])
|
102
|
-
|
103
|
-
return df
|
104
|
-
|
105
|
-
@classmethod
|
106
|
-
def from_paddle_layers(cls, network):
|
107
|
-
""" 先对一些基础的常见类型做些功能接口,复杂的情况以后有需要再说吧 """
|
108
|
-
|
109
|
-
import paddle
|
110
|
-
|
111
|
-
ls = []
|
112
|
-
for x in network.sublayers():
|
113
|
-
if isinstance(x, paddle.nn.layer.conv.Conv2D):
|
114
|
-
ls.append(['Conv2D', x._kernel_size[0], x._stride[0]])
|
115
|
-
elif isinstance(x, paddle.nn.layer.pooling.MaxPool2D):
|
116
|
-
ls.append(['MaxPool2D', x.ksize, x.ksize])
|
117
|
-
else: # 其他先不考虑,跳过
|
118
|
-
pass
|
119
|
-
|
120
|
-
return cls.computing(ls)
|
121
|
-
|
122
|
-
|
123
|
-
def show_feature_map(feature_map, show=True, *, pading=5):
|
124
|
-
""" 显示特征图 """
|
125
|
-
from pyxllib.xlcv import xlcv
|
126
|
-
|
127
|
-
a = np.array(feature_map)
|
128
|
-
a = a - a.min()
|
129
|
-
m = a.max()
|
130
|
-
if m:
|
131
|
-
a = (a / m) * 255
|
132
|
-
a = a.astype('uint8')
|
133
|
-
|
134
|
-
if a.ndim == 3:
|
135
|
-
a = xlcv.concat(list(a), pad=pading)
|
136
|
-
elif a.ndim == 4:
|
137
|
-
a = xlcv.concat([list(x) for x in a], pad=pading)
|
138
|
-
|
139
|
-
if show:
|
140
|
-
xlcv.show(a)
|
141
|
-
|
142
|
-
return a
|
143
|
-
|
144
|
-
|
145
|
-
@run_once
|
146
|
-
def _nvml_init():
|
147
|
-
check_install_package('pynvml')
|
148
|
-
|
149
|
-
import pynvml
|
150
|
-
pynvml.nvmlInit()
|
151
|
-
|
152
|
-
|
153
|
-
class NvmDevice:
|
154
|
-
"""
|
155
|
-
TODO 增加获得多张卡的接口
|
156
|
-
"""
|
157
|
-
|
158
|
-
def __init__(self, *, set_cuda_visible=True):
|
159
|
-
""" 获得各个gpu内存使用信息
|
160
|
-
|
161
|
-
:param set_cuda_visible: 是否根据 环境变量 CUDA_VISIBLE_DEVICES 重新计算gpu的相对编号
|
162
|
-
"""
|
163
|
-
_nvml_init()
|
164
|
-
|
165
|
-
import pynvml
|
166
|
-
|
167
|
-
records = []
|
168
|
-
columns = ['origin_id', # 原始id编号
|
169
|
-
'total', # 总内存
|
170
|
-
'used', # 已使用
|
171
|
-
'free'] # 剩余空间
|
172
|
-
|
173
|
-
try:
|
174
|
-
# 2 每张gpu卡的绝对、相对编号
|
175
|
-
if set_cuda_visible and 'CUDA_VISIBLE_DEVICES' in os.environ:
|
176
|
-
idxs = re.findall(r'\d+', os.environ['CUDA_VISIBLE_DEVICES'])
|
177
|
-
idxs = [int(v) for v in idxs]
|
178
|
-
else:
|
179
|
-
cuda_num = pynvml.nvmlDeviceGetCount()
|
180
|
-
idxs = list(range(cuda_num)) # 如果不限定,则获得所有卡的信息
|
181
|
-
|
182
|
-
# 3 获取每张候选gpu卡的内存使用情况
|
183
|
-
for i in idxs:
|
184
|
-
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
|
185
|
-
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
186
|
-
records.append([i, meminfo.total, meminfo.used, meminfo.free])
|
187
|
-
except (FileNotFoundError, pynvml.nvml.NVMLError_LibraryNotFound) as e:
|
188
|
-
# 注意,找不到nvml.dll文件,不代表没有gpu卡~
|
189
|
-
pass
|
190
|
-
|
191
|
-
self.stat = pd.DataFrame.from_records(records, columns=columns)
|
192
|
-
|
193
|
-
def get_most_free_gpu_id(self, minimum_free_byte=-1, *, reverse=False):
|
194
|
-
""" 获得当前剩余空间最大的gpu的id,没有则返回None
|
195
|
-
|
196
|
-
:param minimum_free_byte: 最少需要剩余的空闲字节数,少于这个值则找不到gpu,返回None
|
197
|
-
"""
|
198
|
-
gpu_id, most_free = None, minimum_free_byte
|
199
|
-
for idx, row in self.stat.iterrows():
|
200
|
-
if row['free'] > most_free:
|
201
|
-
gpu_id, most_free = idx, row['free']
|
202
|
-
# 个人习惯,从后往前找空闲最大的gpu,这样也能尽量避免使用到0卡
|
203
|
-
# 所以≥就更新,而不是>才更新
|
204
|
-
if reverse and row['free'] >= most_free:
|
205
|
-
gpu_id, most_free = idx, row['free']
|
206
|
-
return gpu_id
|
207
|
-
|
208
|
-
def get_free_gpu_ids(self, minimum_free_byte=10 * 1024 ** 3):
|
209
|
-
""" 获取多个有剩余空间的gpu id
|
210
|
-
|
211
|
-
:param minimum_free_byte: 默认值至少需要10G
|
212
|
-
"""
|
213
|
-
gpu_ids = []
|
214
|
-
for idx, row in self.stat.iterrows():
|
215
|
-
if row['free'] >= minimum_free_byte:
|
216
|
-
gpu_ids.append(idx)
|
217
|
-
return gpu_ids
|
218
|
-
|
219
|
-
|
220
|
-
def auto_set_visible_device(reverse=False):
|
221
|
-
""" 自动设置环境变量为可见的某一张单卡
|
222
|
-
|
223
|
-
CUDA_VISIBLE_DEVICES
|
224
|
-
|
225
|
-
对于没有gpu的机子,可以自动设置CUDA_VISIBLE_DEVICES=''空值
|
226
|
-
"""
|
227
|
-
name = 'CUDA_VISIBLE_DEVICES'
|
228
|
-
if name not in os.environ:
|
229
|
-
gpu_id = NvmDevice().get_most_free_gpu_id(reverse=reverse)
|
230
|
-
if gpu_id is not None:
|
231
|
-
os.environ[name] = str(gpu_id)
|
232
|
-
if name in os.environ:
|
233
|
-
print('{}={}'.format(name, os.environ[name]))
|
234
|
-
|
235
|
-
|
236
|
-
def get_current_gpu_useage(card_id=None):
|
237
|
-
""" 查当前gpu最大使用率
|
238
|
-
|
239
|
-
:param card_id:
|
240
|
-
int, 查单卡
|
241
|
-
str, 逗号隔开的多张卡
|
242
|
-
list|tuple, 查多张卡
|
243
|
-
None, 查所有卡
|
244
|
-
"""
|
245
|
-
_nvml_init()
|
246
|
-
|
247
|
-
import pynvml
|
248
|
-
|
249
|
-
# 1 要检查哪些卡
|
250
|
-
if isinstance(card_id, int):
|
251
|
-
idxs = [card_id]
|
252
|
-
elif isinstance(card_id, str):
|
253
|
-
idxs = card_id.split(',')
|
254
|
-
elif isinstance(card_id, (list, tuple)):
|
255
|
-
idxs = card_id
|
256
|
-
else:
|
257
|
-
cuda_num = pynvml.nvmlDeviceGetCount()
|
258
|
-
idxs = list(range(cuda_num)) # 如果不限定,则获得所有卡的信息
|
259
|
-
|
260
|
-
# 2 获取每张候选gpu卡的显存使用情况
|
261
|
-
cur_max_memory = 0
|
262
|
-
for i in idxs:
|
263
|
-
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
|
264
|
-
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
265
|
-
cur_max_memory = max(cur_max_memory, meminfo.used)
|
266
|
-
return cur_max_memory
|
267
|
-
|
268
|
-
|
269
|
-
def watch_gpu_maximun(card_id=None, interval_seconds=0.1):
|
270
|
-
""" 查gpu使用峰值
|
271
|
-
|
272
|
-
:param interval_seconds: 查询间隔
|
273
|
-
"""
|
274
|
-
max_memory = 0
|
275
|
-
while True:
|
276
|
-
cur = get_current_gpu_useage(card_id)
|
277
|
-
max_memory = max(max_memory, cur)
|
278
|
-
print(f'\rmax_memory={format_size(max_memory, binary=True)} '
|
279
|
-
f'current_memory={format_size(cur, binary=True)}', end='')
|
280
|
-
time.sleep(interval_seconds)
|
281
|
-
|
282
|
-
|
283
|
-
if __name__ == '__main__':
|
284
|
-
import fire
|
285
|
-
|
286
|
-
fire.Fire()
|
pyxlpr/ai/torch_app.py
DELETED
@@ -1,172 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
# @Author : 陈坤泽
|
4
|
-
# @Email : 877362867@qq.com
|
5
|
-
# @Date : 2021/07/08 16:16
|
6
|
-
|
7
|
-
import torch
|
8
|
-
from torch import nn
|
9
|
-
import torchvision
|
10
|
-
|
11
|
-
from pyxllib.cv.xlcvlib import CvImg
|
12
|
-
from pyxlpr.ai.xltorch import XlPredictor
|
13
|
-
|
14
|
-
|
15
|
-
class ImageDirectionModelV2(nn.Module):
|
16
|
-
def __init__(self, n_classes=4):
|
17
|
-
super().__init__()
|
18
|
-
|
19
|
-
self.feature_extractor = nn.Sequential(
|
20
|
-
nn.Conv2d(1, 10, kernel_size=5, padding=2),
|
21
|
-
nn.BatchNorm2d(10, track_running_stats=False),
|
22
|
-
nn.ReLU(inplace=True),
|
23
|
-
nn.MaxPool2d(kernel_size=2),
|
24
|
-
nn.Conv2d(10, 5, kernel_size=3),
|
25
|
-
nn.BatchNorm2d(5, track_running_stats=False),
|
26
|
-
nn.ReLU(inplace=True),
|
27
|
-
nn.MaxPool2d(kernel_size=2),
|
28
|
-
nn.Conv2d(in_channels=5, out_channels=5, kernel_size=3),
|
29
|
-
nn.BatchNorm2d(5, track_running_stats=False),
|
30
|
-
nn.ReLU(inplace=True),
|
31
|
-
nn.MaxPool2d(kernel_size=2),
|
32
|
-
)
|
33
|
-
|
34
|
-
self.classifier = nn.Sequential(
|
35
|
-
nn.Linear(in_features=19220, out_features=n_classes),
|
36
|
-
)
|
37
|
-
|
38
|
-
def forward(self, x):
|
39
|
-
device = next(self.parameters()).device
|
40
|
-
x = x.to(device)
|
41
|
-
# dprint(x.shape)
|
42
|
-
x = self.feature_extractor(x)
|
43
|
-
x = torch.flatten(x, 1)
|
44
|
-
logits = self.classifier(x)
|
45
|
-
|
46
|
-
if self.training:
|
47
|
-
return logits
|
48
|
-
else:
|
49
|
-
return logits.argmax(dim=1)
|
50
|
-
|
51
|
-
|
52
|
-
def get_imagedirection_predictor(state_file=None, device=None, batch_size=1):
|
53
|
-
"""
|
54
|
-
:param state_file: 权重文件
|
55
|
-
默认会自动下载,可以不输入
|
56
|
-
如果输入url会自动下载
|
57
|
-
也可以指定本地权重文件
|
58
|
-
|
59
|
-
这里只是该任务比较简单,所以内置权重文件了,
|
60
|
-
在某些复杂项目中,可以设为必选参数
|
61
|
-
|
62
|
-
目前内置的模型,效果并不高,在内部测试集中精度只有57%,
|
63
|
-
但模型比较小才300kb,可以用于演示demo、测试
|
64
|
-
:param device: 计划在哪个设备运行
|
65
|
-
默认会自动找一个空闲最大的gpu设备
|
66
|
-
没有gpu则默认使用cpu模式执行
|
67
|
-
可以自定义 'cpu', 'cuda', 'cuda:1'
|
68
|
-
:param batch_size: 允许多少样本同时识别
|
69
|
-
:return: 生成一个分类器"函数",支持单张、多张图识别,支持路径、cv2、pil格式图片
|
70
|
-
函数返回值
|
71
|
-
0,正向
|
72
|
-
1,顺时针旋转了90度
|
73
|
-
2,顺时针旋转了180度
|
74
|
-
3,顺时针旋转了270度
|
75
|
-
|
76
|
-
补充说明:
|
77
|
-
这相当于是一个最简接口示例,有些模型比较复杂,可以增加初始化用的配置文件等
|
78
|
-
|
79
|
-
"""
|
80
|
-
# 1 确定本地权重文件路径,没有则预制了一个网络上的模型,会自动下载
|
81
|
-
if state_file is None:
|
82
|
-
state_file = 'https://gitee.com/code4101/TestData/raw/master/ImageDirectionModelV2%20epoch=17.pth'
|
83
|
-
|
84
|
-
# 2 初始化分类器 (不一定都要用XlPredictor框架实现,但最终提供的接口希望都跟get_imagedirection_func这样简洁)
|
85
|
-
# 模型结构,可以借助配置文件来初始化,但接口要可能简单。
|
86
|
-
# 权重初始化,参考前面自动获取权重文件的方法,简化交接过程。
|
87
|
-
# device可选项,默认自动查找一个本机设备
|
88
|
-
# batch_size可选项,默认为1,后续pred还可以重新设定。
|
89
|
-
pred = XlPredictor(ImageDirectionModelV2(), state_file, device=device, batch_size=batch_size)
|
90
|
-
|
91
|
-
# 自定义预处理器,即pred(datas)中的data要经过怎样的预处理,再传入model.forward
|
92
|
-
def img_transform(arg):
|
93
|
-
# 输入的参数可以是路径、opencv图片、pil图片
|
94
|
-
# 然后会转为灰度图、resize、to_tensor
|
95
|
-
img = CvImg.read(arg, 0).resize2((512, 512))
|
96
|
-
return torchvision.transforms.functional.to_tensor(img)
|
97
|
-
|
98
|
-
pred.transform = img_transform
|
99
|
-
|
100
|
-
# pred.target_transform 还可以指定对model.forward结果的y_hat进行后处理
|
101
|
-
|
102
|
-
return pred
|
103
|
-
|
104
|
-
|
105
|
-
class ContentTypeModel(nn.Module):
|
106
|
-
def __init__(self, n_classes=3):
|
107
|
-
super().__init__()
|
108
|
-
|
109
|
-
self.feature_extractor = nn.Sequential(
|
110
|
-
nn.Conv2d(3, 10, kernel_size=5, padding=2),
|
111
|
-
nn.BatchNorm2d(10, track_running_stats=False),
|
112
|
-
nn.ReLU(inplace=True),
|
113
|
-
nn.MaxPool2d(kernel_size=2),
|
114
|
-
nn.Conv2d(10, 5, kernel_size=3),
|
115
|
-
nn.BatchNorm2d(5, track_running_stats=False),
|
116
|
-
nn.ReLU(inplace=True),
|
117
|
-
nn.MaxPool2d(kernel_size=2),
|
118
|
-
nn.Conv2d(in_channels=5, out_channels=5, kernel_size=3),
|
119
|
-
nn.BatchNorm2d(5, track_running_stats=False),
|
120
|
-
nn.ReLU(inplace=True),
|
121
|
-
nn.MaxPool2d(kernel_size=2),
|
122
|
-
)
|
123
|
-
|
124
|
-
self.classifier = nn.Sequential(
|
125
|
-
nn.Linear(in_features=180, out_features=n_classes),
|
126
|
-
)
|
127
|
-
|
128
|
-
def forward(self, x):
|
129
|
-
device = next(self.parameters()).device
|
130
|
-
x = x.to(device)
|
131
|
-
|
132
|
-
x = self.feature_extractor(x)
|
133
|
-
x = torch.flatten(x, 1)
|
134
|
-
logits = self.classifier(x)
|
135
|
-
|
136
|
-
if self.training:
|
137
|
-
return logits
|
138
|
-
else:
|
139
|
-
return logits.argmax(dim=1)
|
140
|
-
|
141
|
-
|
142
|
-
def get_contenttype_predictor(state_file=None, device=None, batch_size=1, text=None):
|
143
|
-
"""
|
144
|
-
|
145
|
-
:param text: 将结果类别映射到字符串
|
146
|
-
|
147
|
-
Returns:
|
148
|
-
0, handwriting,手写体
|
149
|
-
1, printed,印刷体
|
150
|
-
2, seal,印章
|
151
|
-
|
152
|
-
"""
|
153
|
-
# 1 确定本地权重文件路径,没有则预制了一个网络上的模型,会自动下载
|
154
|
-
if state_file is None:
|
155
|
-
state_file = 'https://gitee.com/code4101/TestData/raw/master/ContentTypeModel%20epoch=15.pth'
|
156
|
-
|
157
|
-
# 2 初始化分类器 (不一定都要用XlPredictor框架实现,但最终提供的接口希望都跟get_imagedirection_func这样简洁)
|
158
|
-
pred = XlPredictor(ContentTypeModel(3), state_file, device=device, batch_size=batch_size)
|
159
|
-
|
160
|
-
# 自定义预处理器,即pred(datas)中的data要经过怎样的预处理,再传入model.forward
|
161
|
-
def img_transform(arg):
|
162
|
-
# 输入的参数可以是路径、opencv图片、pil图片
|
163
|
-
# 然后会转为灰度图、resize、to_tensor
|
164
|
-
img = CvImg.read(arg, 1).resize2((64, 64))
|
165
|
-
return torchvision.transforms.functional.to_tensor(img)
|
166
|
-
|
167
|
-
pred.transform = img_transform
|
168
|
-
|
169
|
-
if text:
|
170
|
-
pred.target_transform = lambda idx: text[idx]
|
171
|
-
|
172
|
-
return pred
|