pyxllib 0.3.96__py3-none-any.whl → 0.3.200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (358) hide show
  1. pyxllib/__init__.py +21 -21
  2. pyxllib/algo/__init__.py +8 -8
  3. pyxllib/algo/disjoint.py +54 -54
  4. pyxllib/algo/geo.py +541 -529
  5. pyxllib/algo/intervals.py +964 -964
  6. pyxllib/algo/matcher.py +389 -311
  7. pyxllib/algo/newbie.py +166 -166
  8. pyxllib/algo/pupil.py +629 -461
  9. pyxllib/algo/shapelylib.py +67 -67
  10. pyxllib/algo/specialist.py +241 -240
  11. pyxllib/algo/stat.py +494 -458
  12. pyxllib/algo/treelib.py +149 -149
  13. pyxllib/algo/unitlib.py +66 -66
  14. {pyxlpr → pyxllib/autogui}/__init__.py +5 -5
  15. pyxllib/autogui/activewin.py +246 -0
  16. pyxllib/autogui/all.py +9 -0
  17. pyxllib/{ext/autogui → autogui}/autogui.py +852 -823
  18. pyxllib/autogui/uiautolib.py +362 -0
  19. pyxllib/{ext/autogui → autogui}/virtualkey.py +102 -102
  20. pyxllib/autogui/wechat.py +827 -0
  21. pyxllib/autogui/wechat_msg.py +421 -0
  22. pyxllib/autogui/wxautolib.py +84 -0
  23. pyxllib/cv/__init__.py +5 -5
  24. pyxllib/cv/expert.py +267 -267
  25. pyxllib/cv/imfile.py +159 -159
  26. pyxllib/cv/imhash.py +39 -39
  27. pyxllib/cv/pupil.py +9 -9
  28. pyxllib/cv/rgbfmt.py +1525 -1525
  29. pyxllib/cv/slidercaptcha.py +137 -0
  30. pyxllib/cv/trackbartools.py +251 -251
  31. pyxllib/cv/xlcvlib.py +1040 -1040
  32. pyxllib/cv/xlpillib.py +423 -423
  33. pyxllib/data/echarts.py +240 -129
  34. pyxllib/data/jsonlib.py +89 -0
  35. pyxllib/data/oss.py +72 -72
  36. pyxllib/data/pglib.py +1127 -643
  37. pyxllib/data/sqlite.py +568 -341
  38. pyxllib/data/sqllib.py +297 -297
  39. pyxllib/ext/JLineViewer.py +505 -492
  40. pyxllib/ext/__init__.py +6 -6
  41. pyxllib/ext/demolib.py +246 -246
  42. pyxllib/ext/drissionlib.py +277 -0
  43. pyxllib/ext/kq5034lib.py +12 -1606
  44. pyxllib/ext/old.py +663 -663
  45. pyxllib/ext/qt.py +449 -449
  46. pyxllib/ext/robustprocfile.py +497 -0
  47. pyxllib/ext/seleniumlib.py +76 -76
  48. pyxllib/ext/tk.py +173 -173
  49. pyxllib/ext/unixlib.py +827 -826
  50. pyxllib/ext/utools.py +351 -338
  51. pyxllib/ext/webhook.py +124 -101
  52. pyxllib/ext/win32lib.py +40 -40
  53. pyxllib/ext/wjxlib.py +88 -0
  54. pyxllib/ext/wpsapi.py +124 -0
  55. pyxllib/ext/xlwork.py +9 -0
  56. pyxllib/ext/yuquelib.py +1105 -173
  57. pyxllib/file/__init__.py +17 -17
  58. pyxllib/file/docxlib.py +761 -761
  59. pyxllib/file/gitlib.py +309 -309
  60. pyxllib/file/libreoffice.py +165 -0
  61. pyxllib/file/movielib.py +148 -139
  62. pyxllib/file/newbie.py +10 -10
  63. pyxllib/file/onenotelib.py +1469 -1469
  64. pyxllib/file/packlib/__init__.py +330 -293
  65. pyxllib/file/packlib/zipfile.py +2441 -2441
  66. pyxllib/file/pdflib.py +426 -426
  67. pyxllib/file/pupil.py +185 -185
  68. pyxllib/file/specialist/__init__.py +685 -685
  69. pyxllib/file/specialist/dirlib.py +799 -799
  70. pyxllib/file/specialist/download.py +193 -186
  71. pyxllib/file/specialist/filelib.py +2829 -2618
  72. pyxllib/file/xlsxlib.py +3131 -2976
  73. pyxllib/file/xlsyncfile.py +341 -0
  74. pyxllib/prog/__init__.py +5 -5
  75. pyxllib/prog/cachetools.py +64 -0
  76. pyxllib/prog/deprecatedlib.py +233 -233
  77. pyxllib/prog/filelock.py +42 -0
  78. pyxllib/prog/ipyexec.py +253 -253
  79. pyxllib/prog/multiprogs.py +940 -0
  80. pyxllib/prog/newbie.py +451 -444
  81. pyxllib/prog/pupil.py +1197 -1128
  82. pyxllib/prog/sitepackages.py +33 -33
  83. pyxllib/prog/specialist/__init__.py +391 -217
  84. pyxllib/prog/specialist/bc.py +203 -200
  85. pyxllib/prog/specialist/browser.py +497 -488
  86. pyxllib/prog/specialist/common.py +347 -347
  87. pyxllib/prog/specialist/datetime.py +199 -131
  88. pyxllib/prog/specialist/tictoc.py +240 -241
  89. pyxllib/prog/specialist/xllog.py +180 -180
  90. pyxllib/prog/xlosenv.py +108 -101
  91. pyxllib/stdlib/__init__.py +17 -17
  92. pyxllib/stdlib/tablepyxl/__init__.py +10 -10
  93. pyxllib/stdlib/tablepyxl/style.py +303 -303
  94. pyxllib/stdlib/tablepyxl/tablepyxl.py +130 -130
  95. pyxllib/text/__init__.py +8 -8
  96. pyxllib/text/ahocorasick.py +39 -39
  97. pyxllib/text/airscript.js +744 -0
  98. pyxllib/text/charclasslib.py +121 -109
  99. pyxllib/text/jiebalib.py +267 -264
  100. pyxllib/text/jinjalib.py +32 -0
  101. pyxllib/text/jsa_ai_prompt.md +271 -0
  102. pyxllib/text/jscode.py +922 -767
  103. pyxllib/text/latex/__init__.py +158 -158
  104. pyxllib/text/levenshtein.py +303 -303
  105. pyxllib/text/nestenv.py +1215 -1215
  106. pyxllib/text/newbie.py +300 -288
  107. pyxllib/text/pupil/__init__.py +8 -8
  108. pyxllib/text/pupil/common.py +1121 -1095
  109. pyxllib/text/pupil/xlalign.py +326 -326
  110. pyxllib/text/pycode.py +47 -47
  111. pyxllib/text/specialist/__init__.py +8 -8
  112. pyxllib/text/specialist/common.py +112 -112
  113. pyxllib/text/specialist/ptag.py +186 -186
  114. pyxllib/text/spellchecker.py +172 -172
  115. pyxllib/text/templates/echart_base.html +11 -0
  116. pyxllib/text/templates/highlight_code.html +17 -0
  117. pyxllib/text/templates/latex_editor.html +103 -0
  118. pyxllib/text/vbacode.py +17 -17
  119. pyxllib/text/xmllib.py +747 -685
  120. pyxllib/xl.py +42 -38
  121. pyxllib/xlcv.py +17 -17
  122. pyxllib-0.3.200.dist-info/METADATA +48 -0
  123. pyxllib-0.3.200.dist-info/RECORD +126 -0
  124. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info}/WHEEL +1 -2
  125. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info/licenses}/LICENSE +190 -190
  126. pyxllib/ext/autogui/__init__.py +0 -8
  127. pyxllib-0.3.96.dist-info/METADATA +0 -51
  128. pyxllib-0.3.96.dist-info/RECORD +0 -333
  129. pyxllib-0.3.96.dist-info/top_level.txt +0 -2
  130. pyxlpr/ai/__init__.py +0 -5
  131. pyxlpr/ai/clientlib.py +0 -1281
  132. pyxlpr/ai/specialist.py +0 -286
  133. pyxlpr/ai/torch_app.py +0 -172
  134. pyxlpr/ai/xlpaddle.py +0 -655
  135. pyxlpr/ai/xltorch.py +0 -705
  136. pyxlpr/data/__init__.py +0 -11
  137. pyxlpr/data/coco.py +0 -1325
  138. pyxlpr/data/datacls.py +0 -365
  139. pyxlpr/data/datasets.py +0 -200
  140. pyxlpr/data/gptlib.py +0 -1291
  141. pyxlpr/data/icdar/__init__.py +0 -96
  142. pyxlpr/data/icdar/deteval.py +0 -377
  143. pyxlpr/data/icdar/icdar2013.py +0 -341
  144. pyxlpr/data/icdar/iou.py +0 -340
  145. pyxlpr/data/icdar/rrc_evaluation_funcs_1_1.py +0 -463
  146. pyxlpr/data/imtextline.py +0 -473
  147. pyxlpr/data/labelme.py +0 -866
  148. pyxlpr/data/removeline.py +0 -179
  149. pyxlpr/data/specialist.py +0 -57
  150. pyxlpr/eval/__init__.py +0 -85
  151. pyxlpr/paddleocr.py +0 -776
  152. pyxlpr/ppocr/__init__.py +0 -15
  153. pyxlpr/ppocr/configs/rec/multi_language/generate_multi_language_configs.py +0 -226
  154. pyxlpr/ppocr/data/__init__.py +0 -135
  155. pyxlpr/ppocr/data/imaug/ColorJitter.py +0 -26
  156. pyxlpr/ppocr/data/imaug/__init__.py +0 -67
  157. pyxlpr/ppocr/data/imaug/copy_paste.py +0 -170
  158. pyxlpr/ppocr/data/imaug/east_process.py +0 -437
  159. pyxlpr/ppocr/data/imaug/gen_table_mask.py +0 -244
  160. pyxlpr/ppocr/data/imaug/iaa_augment.py +0 -114
  161. pyxlpr/ppocr/data/imaug/label_ops.py +0 -789
  162. pyxlpr/ppocr/data/imaug/make_border_map.py +0 -184
  163. pyxlpr/ppocr/data/imaug/make_pse_gt.py +0 -106
  164. pyxlpr/ppocr/data/imaug/make_shrink_map.py +0 -126
  165. pyxlpr/ppocr/data/imaug/operators.py +0 -433
  166. pyxlpr/ppocr/data/imaug/pg_process.py +0 -906
  167. pyxlpr/ppocr/data/imaug/randaugment.py +0 -143
  168. pyxlpr/ppocr/data/imaug/random_crop_data.py +0 -239
  169. pyxlpr/ppocr/data/imaug/rec_img_aug.py +0 -533
  170. pyxlpr/ppocr/data/imaug/sast_process.py +0 -777
  171. pyxlpr/ppocr/data/imaug/text_image_aug/__init__.py +0 -17
  172. pyxlpr/ppocr/data/imaug/text_image_aug/augment.py +0 -120
  173. pyxlpr/ppocr/data/imaug/text_image_aug/warp_mls.py +0 -168
  174. pyxlpr/ppocr/data/lmdb_dataset.py +0 -115
  175. pyxlpr/ppocr/data/pgnet_dataset.py +0 -104
  176. pyxlpr/ppocr/data/pubtab_dataset.py +0 -107
  177. pyxlpr/ppocr/data/simple_dataset.py +0 -372
  178. pyxlpr/ppocr/losses/__init__.py +0 -61
  179. pyxlpr/ppocr/losses/ace_loss.py +0 -52
  180. pyxlpr/ppocr/losses/basic_loss.py +0 -135
  181. pyxlpr/ppocr/losses/center_loss.py +0 -88
  182. pyxlpr/ppocr/losses/cls_loss.py +0 -30
  183. pyxlpr/ppocr/losses/combined_loss.py +0 -67
  184. pyxlpr/ppocr/losses/det_basic_loss.py +0 -208
  185. pyxlpr/ppocr/losses/det_db_loss.py +0 -80
  186. pyxlpr/ppocr/losses/det_east_loss.py +0 -63
  187. pyxlpr/ppocr/losses/det_pse_loss.py +0 -149
  188. pyxlpr/ppocr/losses/det_sast_loss.py +0 -121
  189. pyxlpr/ppocr/losses/distillation_loss.py +0 -272
  190. pyxlpr/ppocr/losses/e2e_pg_loss.py +0 -140
  191. pyxlpr/ppocr/losses/kie_sdmgr_loss.py +0 -113
  192. pyxlpr/ppocr/losses/rec_aster_loss.py +0 -99
  193. pyxlpr/ppocr/losses/rec_att_loss.py +0 -39
  194. pyxlpr/ppocr/losses/rec_ctc_loss.py +0 -44
  195. pyxlpr/ppocr/losses/rec_enhanced_ctc_loss.py +0 -70
  196. pyxlpr/ppocr/losses/rec_nrtr_loss.py +0 -30
  197. pyxlpr/ppocr/losses/rec_sar_loss.py +0 -28
  198. pyxlpr/ppocr/losses/rec_srn_loss.py +0 -47
  199. pyxlpr/ppocr/losses/table_att_loss.py +0 -109
  200. pyxlpr/ppocr/metrics/__init__.py +0 -44
  201. pyxlpr/ppocr/metrics/cls_metric.py +0 -45
  202. pyxlpr/ppocr/metrics/det_metric.py +0 -82
  203. pyxlpr/ppocr/metrics/distillation_metric.py +0 -73
  204. pyxlpr/ppocr/metrics/e2e_metric.py +0 -86
  205. pyxlpr/ppocr/metrics/eval_det_iou.py +0 -274
  206. pyxlpr/ppocr/metrics/kie_metric.py +0 -70
  207. pyxlpr/ppocr/metrics/rec_metric.py +0 -75
  208. pyxlpr/ppocr/metrics/table_metric.py +0 -50
  209. pyxlpr/ppocr/modeling/architectures/__init__.py +0 -32
  210. pyxlpr/ppocr/modeling/architectures/base_model.py +0 -88
  211. pyxlpr/ppocr/modeling/architectures/distillation_model.py +0 -60
  212. pyxlpr/ppocr/modeling/backbones/__init__.py +0 -54
  213. pyxlpr/ppocr/modeling/backbones/det_mobilenet_v3.py +0 -268
  214. pyxlpr/ppocr/modeling/backbones/det_resnet_vd.py +0 -246
  215. pyxlpr/ppocr/modeling/backbones/det_resnet_vd_sast.py +0 -285
  216. pyxlpr/ppocr/modeling/backbones/e2e_resnet_vd_pg.py +0 -265
  217. pyxlpr/ppocr/modeling/backbones/kie_unet_sdmgr.py +0 -186
  218. pyxlpr/ppocr/modeling/backbones/rec_mobilenet_v3.py +0 -138
  219. pyxlpr/ppocr/modeling/backbones/rec_mv1_enhance.py +0 -258
  220. pyxlpr/ppocr/modeling/backbones/rec_nrtr_mtb.py +0 -48
  221. pyxlpr/ppocr/modeling/backbones/rec_resnet_31.py +0 -210
  222. pyxlpr/ppocr/modeling/backbones/rec_resnet_aster.py +0 -143
  223. pyxlpr/ppocr/modeling/backbones/rec_resnet_fpn.py +0 -307
  224. pyxlpr/ppocr/modeling/backbones/rec_resnet_vd.py +0 -286
  225. pyxlpr/ppocr/modeling/heads/__init__.py +0 -54
  226. pyxlpr/ppocr/modeling/heads/cls_head.py +0 -52
  227. pyxlpr/ppocr/modeling/heads/det_db_head.py +0 -118
  228. pyxlpr/ppocr/modeling/heads/det_east_head.py +0 -121
  229. pyxlpr/ppocr/modeling/heads/det_pse_head.py +0 -37
  230. pyxlpr/ppocr/modeling/heads/det_sast_head.py +0 -128
  231. pyxlpr/ppocr/modeling/heads/e2e_pg_head.py +0 -253
  232. pyxlpr/ppocr/modeling/heads/kie_sdmgr_head.py +0 -206
  233. pyxlpr/ppocr/modeling/heads/multiheadAttention.py +0 -163
  234. pyxlpr/ppocr/modeling/heads/rec_aster_head.py +0 -393
  235. pyxlpr/ppocr/modeling/heads/rec_att_head.py +0 -202
  236. pyxlpr/ppocr/modeling/heads/rec_ctc_head.py +0 -88
  237. pyxlpr/ppocr/modeling/heads/rec_nrtr_head.py +0 -826
  238. pyxlpr/ppocr/modeling/heads/rec_sar_head.py +0 -402
  239. pyxlpr/ppocr/modeling/heads/rec_srn_head.py +0 -280
  240. pyxlpr/ppocr/modeling/heads/self_attention.py +0 -406
  241. pyxlpr/ppocr/modeling/heads/table_att_head.py +0 -246
  242. pyxlpr/ppocr/modeling/necks/__init__.py +0 -32
  243. pyxlpr/ppocr/modeling/necks/db_fpn.py +0 -111
  244. pyxlpr/ppocr/modeling/necks/east_fpn.py +0 -188
  245. pyxlpr/ppocr/modeling/necks/fpn.py +0 -138
  246. pyxlpr/ppocr/modeling/necks/pg_fpn.py +0 -314
  247. pyxlpr/ppocr/modeling/necks/rnn.py +0 -92
  248. pyxlpr/ppocr/modeling/necks/sast_fpn.py +0 -284
  249. pyxlpr/ppocr/modeling/necks/table_fpn.py +0 -110
  250. pyxlpr/ppocr/modeling/transforms/__init__.py +0 -28
  251. pyxlpr/ppocr/modeling/transforms/stn.py +0 -135
  252. pyxlpr/ppocr/modeling/transforms/tps.py +0 -308
  253. pyxlpr/ppocr/modeling/transforms/tps_spatial_transformer.py +0 -156
  254. pyxlpr/ppocr/optimizer/__init__.py +0 -61
  255. pyxlpr/ppocr/optimizer/learning_rate.py +0 -228
  256. pyxlpr/ppocr/optimizer/lr_scheduler.py +0 -49
  257. pyxlpr/ppocr/optimizer/optimizer.py +0 -160
  258. pyxlpr/ppocr/optimizer/regularizer.py +0 -52
  259. pyxlpr/ppocr/postprocess/__init__.py +0 -55
  260. pyxlpr/ppocr/postprocess/cls_postprocess.py +0 -33
  261. pyxlpr/ppocr/postprocess/db_postprocess.py +0 -234
  262. pyxlpr/ppocr/postprocess/east_postprocess.py +0 -143
  263. pyxlpr/ppocr/postprocess/locality_aware_nms.py +0 -200
  264. pyxlpr/ppocr/postprocess/pg_postprocess.py +0 -52
  265. pyxlpr/ppocr/postprocess/pse_postprocess/__init__.py +0 -15
  266. pyxlpr/ppocr/postprocess/pse_postprocess/pse/__init__.py +0 -29
  267. pyxlpr/ppocr/postprocess/pse_postprocess/pse/setup.py +0 -14
  268. pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py +0 -118
  269. pyxlpr/ppocr/postprocess/rec_postprocess.py +0 -654
  270. pyxlpr/ppocr/postprocess/sast_postprocess.py +0 -355
  271. pyxlpr/ppocr/tools/__init__.py +0 -14
  272. pyxlpr/ppocr/tools/eval.py +0 -83
  273. pyxlpr/ppocr/tools/export_center.py +0 -77
  274. pyxlpr/ppocr/tools/export_model.py +0 -129
  275. pyxlpr/ppocr/tools/infer/predict_cls.py +0 -151
  276. pyxlpr/ppocr/tools/infer/predict_det.py +0 -300
  277. pyxlpr/ppocr/tools/infer/predict_e2e.py +0 -169
  278. pyxlpr/ppocr/tools/infer/predict_rec.py +0 -414
  279. pyxlpr/ppocr/tools/infer/predict_system.py +0 -204
  280. pyxlpr/ppocr/tools/infer/utility.py +0 -629
  281. pyxlpr/ppocr/tools/infer_cls.py +0 -83
  282. pyxlpr/ppocr/tools/infer_det.py +0 -134
  283. pyxlpr/ppocr/tools/infer_e2e.py +0 -122
  284. pyxlpr/ppocr/tools/infer_kie.py +0 -153
  285. pyxlpr/ppocr/tools/infer_rec.py +0 -146
  286. pyxlpr/ppocr/tools/infer_table.py +0 -107
  287. pyxlpr/ppocr/tools/program.py +0 -596
  288. pyxlpr/ppocr/tools/test_hubserving.py +0 -117
  289. pyxlpr/ppocr/tools/train.py +0 -163
  290. pyxlpr/ppocr/tools/xlprog.py +0 -748
  291. pyxlpr/ppocr/utils/EN_symbol_dict.txt +0 -94
  292. pyxlpr/ppocr/utils/__init__.py +0 -24
  293. pyxlpr/ppocr/utils/dict/ar_dict.txt +0 -117
  294. pyxlpr/ppocr/utils/dict/arabic_dict.txt +0 -162
  295. pyxlpr/ppocr/utils/dict/be_dict.txt +0 -145
  296. pyxlpr/ppocr/utils/dict/bg_dict.txt +0 -140
  297. pyxlpr/ppocr/utils/dict/chinese_cht_dict.txt +0 -8421
  298. pyxlpr/ppocr/utils/dict/cyrillic_dict.txt +0 -163
  299. pyxlpr/ppocr/utils/dict/devanagari_dict.txt +0 -167
  300. pyxlpr/ppocr/utils/dict/en_dict.txt +0 -63
  301. pyxlpr/ppocr/utils/dict/fa_dict.txt +0 -136
  302. pyxlpr/ppocr/utils/dict/french_dict.txt +0 -136
  303. pyxlpr/ppocr/utils/dict/german_dict.txt +0 -143
  304. pyxlpr/ppocr/utils/dict/hi_dict.txt +0 -162
  305. pyxlpr/ppocr/utils/dict/it_dict.txt +0 -118
  306. pyxlpr/ppocr/utils/dict/japan_dict.txt +0 -4399
  307. pyxlpr/ppocr/utils/dict/ka_dict.txt +0 -153
  308. pyxlpr/ppocr/utils/dict/korean_dict.txt +0 -3688
  309. pyxlpr/ppocr/utils/dict/latin_dict.txt +0 -185
  310. pyxlpr/ppocr/utils/dict/mr_dict.txt +0 -153
  311. pyxlpr/ppocr/utils/dict/ne_dict.txt +0 -153
  312. pyxlpr/ppocr/utils/dict/oc_dict.txt +0 -96
  313. pyxlpr/ppocr/utils/dict/pu_dict.txt +0 -130
  314. pyxlpr/ppocr/utils/dict/rs_dict.txt +0 -91
  315. pyxlpr/ppocr/utils/dict/rsc_dict.txt +0 -134
  316. pyxlpr/ppocr/utils/dict/ru_dict.txt +0 -125
  317. pyxlpr/ppocr/utils/dict/ta_dict.txt +0 -128
  318. pyxlpr/ppocr/utils/dict/table_dict.txt +0 -277
  319. pyxlpr/ppocr/utils/dict/table_structure_dict.txt +0 -2759
  320. pyxlpr/ppocr/utils/dict/te_dict.txt +0 -151
  321. pyxlpr/ppocr/utils/dict/ug_dict.txt +0 -114
  322. pyxlpr/ppocr/utils/dict/uk_dict.txt +0 -142
  323. pyxlpr/ppocr/utils/dict/ur_dict.txt +0 -137
  324. pyxlpr/ppocr/utils/dict/xi_dict.txt +0 -110
  325. pyxlpr/ppocr/utils/dict90.txt +0 -90
  326. pyxlpr/ppocr/utils/e2e_metric/Deteval.py +0 -574
  327. pyxlpr/ppocr/utils/e2e_metric/polygon_fast.py +0 -83
  328. pyxlpr/ppocr/utils/e2e_utils/extract_batchsize.py +0 -87
  329. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_fast.py +0 -457
  330. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_slow.py +0 -592
  331. pyxlpr/ppocr/utils/e2e_utils/pgnet_pp_utils.py +0 -162
  332. pyxlpr/ppocr/utils/e2e_utils/visual.py +0 -162
  333. pyxlpr/ppocr/utils/en_dict.txt +0 -95
  334. pyxlpr/ppocr/utils/gen_label.py +0 -81
  335. pyxlpr/ppocr/utils/ic15_dict.txt +0 -36
  336. pyxlpr/ppocr/utils/iou.py +0 -54
  337. pyxlpr/ppocr/utils/logging.py +0 -69
  338. pyxlpr/ppocr/utils/network.py +0 -84
  339. pyxlpr/ppocr/utils/ppocr_keys_v1.txt +0 -6623
  340. pyxlpr/ppocr/utils/profiler.py +0 -110
  341. pyxlpr/ppocr/utils/save_load.py +0 -150
  342. pyxlpr/ppocr/utils/stats.py +0 -72
  343. pyxlpr/ppocr/utils/utility.py +0 -80
  344. pyxlpr/ppstructure/__init__.py +0 -13
  345. pyxlpr/ppstructure/predict_system.py +0 -187
  346. pyxlpr/ppstructure/table/__init__.py +0 -13
  347. pyxlpr/ppstructure/table/eval_table.py +0 -72
  348. pyxlpr/ppstructure/table/matcher.py +0 -192
  349. pyxlpr/ppstructure/table/predict_structure.py +0 -136
  350. pyxlpr/ppstructure/table/predict_table.py +0 -221
  351. pyxlpr/ppstructure/table/table_metric/__init__.py +0 -16
  352. pyxlpr/ppstructure/table/table_metric/parallel.py +0 -51
  353. pyxlpr/ppstructure/table/table_metric/table_metric.py +0 -247
  354. pyxlpr/ppstructure/table/tablepyxl/__init__.py +0 -13
  355. pyxlpr/ppstructure/table/tablepyxl/style.py +0 -283
  356. pyxlpr/ppstructure/table/tablepyxl/tablepyxl.py +0 -118
  357. pyxlpr/ppstructure/utility.py +0 -71
  358. pyxlpr/xlai.py +0 -10
@@ -0,0 +1,497 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # @Author : 陈坤泽
4
+ # @Email : 877362867@qq.com
5
+ # @Date : 2024/03/28
6
+
7
+ """
8
+ 处理数据文件常用的拆分逻辑
9
+
10
+ 半定制化的功能组件
11
+ """
12
+
13
+ from pyxllib.prog.pupil import check_install_package
14
+
15
+ check_install_package('joblib')
16
+
17
+ from collections import defaultdict, Counter
18
+ import datetime
19
+ import re
20
+ import json
21
+
22
+ from tqdm import tqdm
23
+ from joblib import Parallel, delayed
24
+
25
+ from pyxllib.prog.pupil import check_counter, tprint, typename
26
+ from pyxllib.file.specialist import XlPath, refinepath
27
+
28
+
29
+ def get_timestamp(fmt='%Y%m%d%H%M%S'):
30
+ return datetime.datetime.now().strftime(fmt)
31
+
32
+
33
+ def process_file_base(file_func, file, correct_tag, error_tag,
34
+ *,
35
+ correct_dir=None, error_dir=None,
36
+ correct_file_name=None, error_file_name=None,
37
+ split_group_func=None,
38
+ reset=False):
39
+ """ 处理单个文件数据,默认将两套运行结果放在同目录的两个文件中
40
+
41
+ :param file_func: 对于自定义的处理函数
42
+ 返回支持1个参数,2个参数。1个参数时,默认没有错误的记录数据。
43
+ 对于数据,支持list[str]或list[dict]等多种格式
44
+ :param split_group_func: 对正确的数据,按照某种规则进一步分组
45
+ """
46
+
47
+ # 0 工具函数
48
+ def remove_old_file(new_file):
49
+ # 如果目录下有同名函数,仅最后的时间戳不同的旧文件,删除
50
+ stem = re.sub(r'_\d{14}$', '', new_file.stem)
51
+ fmt_str = fr'{stem}_\d{{14}}{new_file.suffix}'
52
+ for f in new_file.parent.glob(f'{stem}_*{new_file.suffix}'):
53
+ if re.match(fmt_str, f.name):
54
+ f.delete()
55
+
56
+ def write_file(_dir, name, tag, data):
57
+ if name is None:
58
+ suffix = file.suffix if isinstance(data[0], str) else '.jsonl'
59
+ name = f'{file.stem}_{tag}{len(data)}_{get_timestamp()}{suffix}'
60
+ _file = _dir / name
61
+ _file.parent.mkdir(exist_ok=True, parents=True)
62
+ if reset: # 找到旧后缀的文件,删除,排除时间戳差异的文件检索
63
+ remove_old_file(_file)
64
+
65
+ if isinstance(data[0], str):
66
+ _file.write_text('\n'.join(data))
67
+ else:
68
+ _file.write_jsonl(data)
69
+
70
+ # 1 处理数据
71
+ file = XlPath(file)
72
+ res = file_func(file)
73
+ if isinstance(res, tuple) and len(res) == 2:
74
+ correct_data, error_data = res
75
+ else:
76
+ correct_data = res
77
+ error_data = None
78
+
79
+ # 2 计算标准文件名
80
+ if correct_data:
81
+ correct_dir = file.parent if correct_dir is None else XlPath(correct_dir)
82
+ suffix = file.suffix if isinstance(correct_data[0], str) else '.jsonl'
83
+ if split_group_func: # 使用split_group_func的时候,split_group_func不生效
84
+ group_data = defaultdict(list)
85
+ for x in correct_data:
86
+ group_data[split_group_func(x)].append(x)
87
+
88
+ for k, correct_data in group_data.items():
89
+ correct_file_name2 = (f'{file.stem}_{refinepath(k)}_'
90
+ f'{correct_tag}{len(correct_data)}_{get_timestamp()}{suffix}')
91
+ write_file(correct_dir, correct_file_name2, correct_tag, correct_data)
92
+ else:
93
+ write_file(correct_dir, correct_file_name, correct_tag, correct_data)
94
+ if error_data:
95
+ error_dir = file.parent if error_dir is None else XlPath(error_dir)
96
+ write_file(error_dir, error_file_name, error_tag, error_data)
97
+
98
+
99
+ def process_dir_base(file_func, dir_path, correct_tag, error_tag,
100
+ *, pattern='*', correct_dir=None, error_dir=None, reset=False,
101
+ **kwargs):
102
+ """ 处理一个目录下的所有文件
103
+ """
104
+ dir_path = XlPath(dir_path)
105
+ correct_dir = XlPath(correct_dir) if correct_dir else (dir_path.parent / f'{dir_path.name}_{correct_tag}')
106
+ error_dir = XlPath(error_dir) if error_dir else (dir_path.parent / f'{dir_path.name}_{error_tag}')
107
+
108
+ files = list(dir_path.rglob(pattern))
109
+ for idx, file in enumerate(files, start=1):
110
+ tprint(f'处理完第{idx}/{len(files)}个文件: {file.name} ==> {correct_tag}')
111
+ process_file_base(file_func, file, correct_tag, error_tag,
112
+ correct_dir=correct_dir, error_dir=error_dir, reset=reset,
113
+ **kwargs)
114
+
115
+
116
+ def process_path(file_func, tag, path, **kwargs):
117
+ """ 对单文件,或者目录处理的封装 """
118
+ path = XlPath(path)
119
+
120
+ if isinstance(file_func, str): # 用命令行等接口的时候,输入可能是字符串名
121
+ file_func = globals()[file_func]
122
+
123
+ # 有些特殊参数,是预设给func使用的
124
+ func_args = {}
125
+ if 'remove_repeat_mode' in kwargs:
126
+ func_args['remove_repeat_mode'] = kwargs.pop('remove_repeat_mode')
127
+
128
+ if func_args:
129
+ file_func2 = lambda x: file_func(x, **func_args)
130
+ else:
131
+ file_func2 = file_func
132
+
133
+ if path.is_file():
134
+ process_file_base(file_func2, path, tag, f'{tag}error', **kwargs)
135
+ elif path.is_dir():
136
+ process_dir_base(file_func2, path, tag, f'{tag}error', **kwargs)
137
+
138
+
139
+ class Analyzer:
140
+ """ 分析器,一般用来写一些不影响主体功能,协助调试,分析数据特征的功能 """
141
+
142
+ def __init__(self):
143
+ self.counters = defaultdict(Counter)
144
+
145
+ def count_once(self, tag, key):
146
+ # tag相当于需要分组统计的名称
147
+ self.counters[tag][key] += 1
148
+
149
+ def check_counter(self):
150
+ # 检查各种特征出现的次数
151
+ for name, ct in self.counters.items():
152
+ print(f'【{name}】')
153
+ check_counter(ct)
154
+
155
+
156
+ def head_data(infile, num=1000, file_size=50):
157
+ """ 获取infile头部部分数据
158
+
159
+ :param file_size: 除了数量限制外,同时限制文件大小不超过file_size MB
160
+ 这里不是精确解法,而是简化大概的写法
161
+ """
162
+ res = []
163
+ total_size = 0
164
+ for x in XlPath(infile).yield_line(end=num):
165
+ sz = len(x)
166
+ total_size += sz
167
+ if total_size > file_size * 1024 * 1024:
168
+ break
169
+ res.append(x)
170
+
171
+ return res
172
+
173
+
174
+ def remove_repeat_base(infile,
175
+ get_etags_func,
176
+ exists_etags=set(), # trick: 这里需要全局记录已出现过的etag
177
+ ):
178
+ """ 去除重复数据,基础函数
179
+
180
+ :param get_etags_func: 每道题目的etag,用于判断是否重复
181
+ 可能用于判断的etags数量不止一个,可以返回多个
182
+ 返回一个值的版本也可以,会自动转换为list
183
+ """
184
+ data = XlPath(infile).read_jsonl()
185
+ src_len = len(data)
186
+
187
+ data2 = []
188
+ for x in data:
189
+ etags = get_etags_func(x)
190
+ if isinstance(etags, str):
191
+ etags = [etags]
192
+ if any(etag in exists_etags for etag in etags):
193
+ continue
194
+ data2.append(x)
195
+ exists_etags.update(set(etags))
196
+
197
+ new_len = len(data2)
198
+ print(f'去重后剩余 {new_len}/{src_len} ≈ {new_len / src_len:.2%} 的数据')
199
+ return data2
200
+
201
+
202
+ class CacheJsonlFile:
203
+ """ 流式存储文件 """
204
+
205
+ def __init__(self, parent_dir, prefix_stem, tag, batch_size=2000):
206
+ self.parent_dir = parent_dir
207
+ self.prefix_stem = prefix_stem
208
+ self.tag = tag
209
+ self.cache_text_lines = []
210
+ self.batch_size = batch_size
211
+
212
+ # 缓存时的文件名
213
+ self.cache_file = XlPath(parent_dir) / f'{prefix_stem}_{tag}.cache.jsonl'
214
+ self.total = 0
215
+
216
+ def append(self, data):
217
+ for x in data:
218
+ if isinstance(x, str):
219
+ self.cache_text_lines.append(x)
220
+ else:
221
+ self.cache_text_lines.append(json.dumps(x, ensure_ascii=False))
222
+ if len(self.cache_text_lines) >= self.batch_size:
223
+ self.flush()
224
+
225
+ def flush(self):
226
+ """ 刷新,将当前缓存写入文件 """
227
+ if self.cache_text_lines:
228
+ if self.total == 0: # 第一次写入时,删除旧缓存文件
229
+ self.cache_file.delete()
230
+
231
+ self.total += len(self.cache_text_lines)
232
+ self.parent_dir.mkdir(exist_ok=True, parents=True)
233
+ with open(self.cache_file, 'a', encoding='utf8') as f:
234
+ f.write('\n'.join(self.cache_text_lines) + '\n')
235
+ self.cache_text_lines = []
236
+
237
+ def save_all(self):
238
+ """ 最终存储的文件名 """
239
+ self.flush()
240
+ if self.cache_file.is_file():
241
+ dst_file = self.cache_file.with_stem(f'{self.prefix_stem}_{self.tag}{self.total}_{get_timestamp()}')
242
+ self.cache_file.rename(dst_file)
243
+
244
+
245
+ class CacheJsonlGroupFiles:
246
+ def __init__(self, parent_dir, prefix_stem, tag, group_func=None, batch_size=2000):
247
+ self.files = {}
248
+
249
+ self.parent_dir = parent_dir
250
+ self.prefix_stem = prefix_stem
251
+ self.tag = tag
252
+ self.group_func = group_func
253
+ self.batch_size = batch_size
254
+
255
+ def append(self, data):
256
+ groups = defaultdict(list)
257
+ if self.group_func:
258
+ for x in data:
259
+ groups[self.group_func(x)].append(x)
260
+ else:
261
+ groups[''] = data
262
+
263
+ for k, v in groups.items():
264
+ if k not in self.files:
265
+ subtag = f'{k}_{self.tag}' if k not in ('', None) else self.tag
266
+ self.files[k] = CacheJsonlFile(self.parent_dir, self.prefix_stem, subtag, self.batch_size)
267
+ self.files[k].append(v)
268
+
269
+ def save_all(self):
270
+ for file in self.files.values():
271
+ file.save_all()
272
+
273
+
274
+ def process_single_file(root,
275
+ infile,
276
+ tag,
277
+ row_func,
278
+ *,
279
+ cur_idx=1,
280
+ total=None,
281
+ if_exists='skip',
282
+ group_func=None,
283
+ batch_size=2000,
284
+ debug=False,
285
+ show_tqdm=False):
286
+ def finished():
287
+ if total:
288
+ tprint(f'处理第{cur_idx}/{total}个文件:{infile.name} ==> {tag}')
289
+ else:
290
+ tprint(f'处理文件:{infile.name} ==> {tag}')
291
+
292
+ # 1 缓存路径
293
+ srcdir = XlPath(root)
294
+ infile = XlPath.init(infile, root=srcdir)
295
+ relpath = infile.relative_to(srcdir)
296
+
297
+ dstdir = srcdir.parent / f'{srcdir.name}_{tag}' / relpath.parent
298
+ errdir = srcdir.parent / f'{srcdir.name}_{tag}error' / relpath.parent
299
+
300
+ # 2 判断是不是已处理过的文件
301
+ # stem = re.split(r'\d+_\d{14}', relpath.stem)[0]
302
+ stem = relpath.stem + f'_{tag}'
303
+ dstfiles = list(dstdir.glob_files(f'{stem}*.jsonl'))
304
+ errfiles = list(errdir.glob_files(f'{stem}*.jsonl'))
305
+
306
+ # 需要进一步过滤
307
+ def is_exists_old_file(f):
308
+ if f.name.endswith('.cache.jsonl'):
309
+ return False
310
+ stem2 = f.name[len(stem):]
311
+ if re.match(r'(error)?\d+_\d{14}.jsonl$', stem2):
312
+ return True
313
+
314
+ # cache文件不算,不用管
315
+ check_files = [f for f in (dstfiles + errfiles) if is_exists_old_file(f)]
316
+
317
+ if check_files:
318
+ if if_exists == 'skip':
319
+ finished()
320
+ return
321
+ elif if_exists == 'overwrite':
322
+ for f in check_files:
323
+ f.delete()
324
+ elif if_exists == 'error':
325
+ raise FileExistsError(f'目标文件已存在:{check_files}')
326
+ else:
327
+ finished()
328
+ return
329
+
330
+ # 3 处理数据
331
+ if debug: # 如果开启调试模式,则关闭分组功能
332
+ group_func = None
333
+ dstcgf = CacheJsonlGroupFiles(dstdir, infile.stem, tag, group_func, batch_size)
334
+ errcgf = CacheJsonlGroupFiles(errdir, infile.stem, tag + 'error', batch_size=batch_size)
335
+ if debug: # 如果开启调试模式,则单独分错误文件
336
+ errcgf = dstcgf
337
+
338
+ for line in tqdm(infile.yield_line(), desc=f'{infile.name} ==> {tag}', disable=not show_tqdm):
339
+ # todo 出错的数据,应该添加错误信息,也存成一个新的jsonl格式
340
+ row = row_func(line)
341
+ if row and (not isinstance(row, dict) or row.get('status', 'ok') == 'ok'):
342
+ # 必须要有内容,但如果内容有个status字段且不是'ok',也不能要
343
+ dstcgf.append([row])
344
+ else:
345
+ # 注意旧版存储的事line,但是新版的存储成row了
346
+ # 但如果row确实没内容,还是兼容旧版,存储line
347
+ errcgf.append([row or line])
348
+
349
+ dstcgf.save_all()
350
+ errcgf.save_all()
351
+ finished()
352
+
353
+
354
+ class StructureAnalyzer:
355
+ @classmethod
356
+ def item_to_json(cls, x, depth):
357
+ """ 获得字典结构的签名
358
+
359
+ todo 这个项目之后,可以对这个函数进一步优化精简,作为以后解析结构的一个通用工具
360
+ """
361
+ if depth <= 0:
362
+ return typename(x)
363
+
364
+ if isinstance(x, dict):
365
+ d = {}
366
+ keys = sorted(x.keys())
367
+ for k in keys:
368
+ d[k] = cls.item_to_json(x[k], depth - 1)
369
+ elif isinstance(x, list):
370
+ d = []
371
+ for k in x:
372
+ d.append(cls.item_to_json(k, depth - 1))
373
+ else:
374
+ d = typename(x)
375
+
376
+ return d
377
+
378
+ @classmethod
379
+ def item_to_str(cls, x, depth):
380
+ res = cls.item_to_json(x, depth)
381
+ return json.dumps(res)
382
+
383
+ @classmethod
384
+ def group_items(cls, items, depth):
385
+ ct = Counter()
386
+ groups = defaultdict(list)
387
+ for x in items:
388
+ desc = cls.item_to_str(x, depth)
389
+ ct[desc] += 1
390
+ groups[desc].append(x)
391
+ # 按照值的数量对groups排序
392
+ groups = dict(sorted(groups.items(), key=lambda x: -len(x[1])))
393
+ return groups
394
+
395
+ @classmethod
396
+ def get_items_structures(cls, items, savefile=None):
397
+ """ 获取jsonl数据的结构分布
398
+
399
+ :param list[json] items: 一组json数据
400
+ :return list[json]: 统计每种结构的分布数量,按树形结构,从多到少排序展示
401
+ """
402
+
403
+ def add_group(parent, items, depth, res):
404
+ tag = parent['depth'] if parent else ''
405
+ groups = cls.group_items(items, depth)
406
+ for i, (desc, items2) in enumerate(groups.items(), start=1):
407
+ if desc == parent['desc']: # 再细化的结果跟父结点相同时,不再加深层级
408
+ continue
409
+
410
+ d = {}
411
+ d['depth'] = f'{tag}-{i}' if tag else f'{i}'
412
+ d['count'] = len(items2)
413
+ d['desc'] = desc
414
+ d['structure'] = cls.item_to_json(items2[0], depth)
415
+ res.append(d)
416
+
417
+ add_group(d, items2, depth + 1, res)
418
+
419
+ res = [] # 初始化结果列表
420
+ add_group({'depth': '', 'desc': ''}, items, 1, res) # 从空标签开始递归
421
+
422
+ if savefile:
423
+ XlPath(savefile).write_jsonl(res)
424
+
425
+ return res
426
+
427
+ @classmethod
428
+ def compare_keys(cls, items):
429
+ """ 对从 get_items_structures 获得的数据,对比structure的覆盖情况 """
430
+ import pandas as pd
431
+ from pyxllib.algo.stat import custom_fillna
432
+
433
+ ls = []
434
+ for x in items:
435
+ row = {}
436
+ row['__depth'] = x['depth']
437
+ row['__count'] = x['count']
438
+ if isinstance(x['structure'], dict):
439
+ row.update(x['structure'])
440
+ elif isinstance(x['structure'], list):
441
+ row.update({f'[{k}]': v for k, v in enumerate(x['structure'])})
442
+ ls.append(row)
443
+
444
+ df = pd.DataFrame.from_dict(ls)
445
+ # 筛选df,depth中没有'-'
446
+ df = df[df['__depth'].apply(lambda x: '-' not in x)]
447
+ df = custom_fillna(df).T
448
+
449
+ return df
450
+
451
+
452
+ def process_batch_files(srcdir,
453
+ dsttag,
454
+ line_convert_func,
455
+ pattern='*.jsonl',
456
+ if_exists='skip',
457
+ processes_num=1,
458
+ group_func=None,
459
+ batch_size=2000,
460
+ debug=False,
461
+ ):
462
+ """ 通用批处理函数
463
+
464
+ :param srcdir: 输入待处理的目录或文件
465
+ :param pattern: 文件名检索逻辑,如 '*','*.jsonl', '*_std*.jsonl'等
466
+ :param if_exists: 如果目标文件已存在,如何处理。'skip'跳过不重复处理,'overwrite'覆盖重新运行,'error'抛出报错
467
+ :param processes_num: 并发处理的进程数
468
+ :param batch_size: 每个文件缓存的条目数,超过这个数目后就会先写入文件
469
+ """
470
+ # 1 检索文件
471
+ srcdir = XlPath(srcdir)
472
+ if srcdir.is_file():
473
+ files = [srcdir.name]
474
+ srcdir = srcdir.parent
475
+ else:
476
+ files = [f.relpath(srcdir) for f in srcdir.rglob_files(pattern)]
477
+
478
+ file_num = len(files)
479
+
480
+ # 2 并发处理多个文件
481
+ backend = 'loky' if processes_num > 1 else 'sequential'
482
+ tasks = []
483
+ for i, f in enumerate(files, start=1):
484
+ task = delayed(process_single_file)(srcdir,
485
+ f,
486
+ dsttag,
487
+ line_convert_func,
488
+ cur_idx=i,
489
+ total=file_num,
490
+ if_exists=if_exists,
491
+ group_func=group_func,
492
+ batch_size=batch_size,
493
+ debug=debug,
494
+ show_tqdm=processes_num == 1)
495
+ tasks.append(task)
496
+
497
+ Parallel(n_jobs=processes_num, backend=backend)(tasks)
@@ -1,76 +1,76 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # @Author : 陈坤泽
4
- # @Email : 877362867@qq.com
5
- # @Date : 2023/03/28
6
-
7
- import time
8
-
9
- from selenium import webdriver
10
- from selenium.webdriver.common.by import By
11
- from selenium.webdriver.support.ui import WebDriverWait
12
- from selenium.webdriver.support import expected_conditions as EC
13
- from selenium.common.exceptions import ElementClickInterceptedException, NoSuchWindowException
14
-
15
-
16
- class element_has_text(object):
17
- def __init__(self, locator, text):
18
- self.locator = locator
19
- self.text = text
20
-
21
- def __call__(self, driver):
22
- element = driver.find_element(*self.locator)
23
- if self.text in element.text:
24
- return element
25
- else:
26
- return False
27
-
28
-
29
- class XlChrome(webdriver.Chrome):
30
- def __init__(self, *args, **kwargs):
31
- super().__init__(*args, **kwargs)
32
- self.maximize_window()
33
-
34
- def locate(self, locator, seconds=10):
35
- """ 定位一个元素 """
36
- if isinstance(locator, str): # 默认是XPATH格式
37
- locator = (By.XPATH, locator)
38
- return WebDriverWait(self, seconds).until(EC.presence_of_element_located(locator))
39
-
40
- def click(self, locator, seconds=10, check=True):
41
- """ 点击一个元素 """
42
- if isinstance(locator, str):
43
- locator = (By.XPATH, locator)
44
- if check:
45
- element = WebDriverWait(self, seconds).until(EC.element_to_be_clickable(locator))
46
- else:
47
- element = self.locate(locator, seconds)
48
- time.sleep(0.5) # 最好稍微等一下再点击
49
- try:
50
- element.click()
51
- except ElementClickInterceptedException:
52
- # 特殊情况,例如小鹅通下载页面的"下载"按钮没法正常click,要用js脚本去click
53
- self.execute_script("arguments[0].click();", element)
54
-
55
- def locate_text(self, locator, text, seconds=10):
56
- """ 判断指定元素位置是否含有指定文本 """
57
- if isinstance(locator, str):
58
- locator = (By.XPATH, locator)
59
- return WebDriverWait(self, seconds).until(element_has_text(locator, text))
60
-
61
- def __bool__(self):
62
- """ 判断driver是否还存在,如果已被手动关闭,这个值会返回False """
63
- try:
64
- self.title
65
- return True
66
- except NoSuchWindowException:
67
- return False
68
-
69
-
70
- def get_global_driver(_driver_store=[None]): # trick
71
- """ 通过这个接口可以固定一个driver来使用 """
72
- if _driver_store[0] is None:
73
- _driver_store[0] = XlChrome()
74
- if not _driver_store[0]: # 如果驱动没了,重新启动
75
- _driver_store[0] = XlChrome()
76
- return _driver_store[0]
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # @Author : 陈坤泽
4
+ # @Email : 877362867@qq.com
5
+ # @Date : 2023/03/28
6
+
7
+ import time
8
+
9
+ from selenium import webdriver
10
+ from selenium.webdriver.common.by import By
11
+ from selenium.webdriver.support.ui import WebDriverWait
12
+ from selenium.webdriver.support import expected_conditions as EC
13
+ from selenium.common.exceptions import ElementClickInterceptedException, NoSuchWindowException
14
+
15
+
16
+ class element_has_text(object):
17
+ def __init__(self, locator, text):
18
+ self.locator = locator
19
+ self.text = text
20
+
21
+ def __call__(self, driver):
22
+ element = driver.find_element(*self.locator)
23
+ if self.text in element.text:
24
+ return element
25
+ else:
26
+ return False
27
+
28
+
29
+ class XlChrome(webdriver.Chrome):
30
+ def __init__(self, *args, **kwargs):
31
+ super().__init__(*args, **kwargs)
32
+ self.maximize_window()
33
+
34
+ def locate(self, locator, seconds=10):
35
+ """ 定位一个元素 """
36
+ if isinstance(locator, str): # 默认是XPATH格式
37
+ locator = (By.XPATH, locator)
38
+ return WebDriverWait(self, seconds).until(EC.presence_of_element_located(locator))
39
+
40
+ def click(self, locator, seconds=10, check=True):
41
+ """ 点击一个元素 """
42
+ if isinstance(locator, str):
43
+ locator = (By.XPATH, locator)
44
+ if check:
45
+ element = WebDriverWait(self, seconds).until(EC.element_to_be_clickable(locator))
46
+ else:
47
+ element = self.locate(locator, seconds)
48
+ time.sleep(0.5) # 最好稍微等一下再点击
49
+ try:
50
+ element.click()
51
+ except ElementClickInterceptedException:
52
+ # 特殊情况,例如小鹅通下载页面的"下载"按钮没法正常click,要用js脚本去click
53
+ self.execute_script("arguments[0].click();", element)
54
+
55
+ def locate_text(self, locator, text, seconds=10):
56
+ """ 判断指定元素位置是否含有指定文本 """
57
+ if isinstance(locator, str):
58
+ locator = (By.XPATH, locator)
59
+ return WebDriverWait(self, seconds).until(element_has_text(locator, text))
60
+
61
+ def __bool__(self):
62
+ """ 判断driver是否还存在,如果已被手动关闭,这个值会返回False """
63
+ try:
64
+ self.title
65
+ return True
66
+ except NoSuchWindowException:
67
+ return False
68
+
69
+
70
+ def get_global_driver(_driver_store=[None]): # trick
71
+ """ 通过这个接口可以固定一个driver来使用 """
72
+ if _driver_store[0] is None:
73
+ _driver_store[0] = XlChrome()
74
+ if not _driver_store[0]: # 如果驱动没了,重新启动
75
+ _driver_store[0] = XlChrome()
76
+ return _driver_store[0]