pyxllib 0.3.96__py3-none-any.whl → 0.3.200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (358) hide show
  1. pyxllib/__init__.py +21 -21
  2. pyxllib/algo/__init__.py +8 -8
  3. pyxllib/algo/disjoint.py +54 -54
  4. pyxllib/algo/geo.py +541 -529
  5. pyxllib/algo/intervals.py +964 -964
  6. pyxllib/algo/matcher.py +389 -311
  7. pyxllib/algo/newbie.py +166 -166
  8. pyxllib/algo/pupil.py +629 -461
  9. pyxllib/algo/shapelylib.py +67 -67
  10. pyxllib/algo/specialist.py +241 -240
  11. pyxllib/algo/stat.py +494 -458
  12. pyxllib/algo/treelib.py +149 -149
  13. pyxllib/algo/unitlib.py +66 -66
  14. {pyxlpr → pyxllib/autogui}/__init__.py +5 -5
  15. pyxllib/autogui/activewin.py +246 -0
  16. pyxllib/autogui/all.py +9 -0
  17. pyxllib/{ext/autogui → autogui}/autogui.py +852 -823
  18. pyxllib/autogui/uiautolib.py +362 -0
  19. pyxllib/{ext/autogui → autogui}/virtualkey.py +102 -102
  20. pyxllib/autogui/wechat.py +827 -0
  21. pyxllib/autogui/wechat_msg.py +421 -0
  22. pyxllib/autogui/wxautolib.py +84 -0
  23. pyxllib/cv/__init__.py +5 -5
  24. pyxllib/cv/expert.py +267 -267
  25. pyxllib/cv/imfile.py +159 -159
  26. pyxllib/cv/imhash.py +39 -39
  27. pyxllib/cv/pupil.py +9 -9
  28. pyxllib/cv/rgbfmt.py +1525 -1525
  29. pyxllib/cv/slidercaptcha.py +137 -0
  30. pyxllib/cv/trackbartools.py +251 -251
  31. pyxllib/cv/xlcvlib.py +1040 -1040
  32. pyxllib/cv/xlpillib.py +423 -423
  33. pyxllib/data/echarts.py +240 -129
  34. pyxllib/data/jsonlib.py +89 -0
  35. pyxllib/data/oss.py +72 -72
  36. pyxllib/data/pglib.py +1127 -643
  37. pyxllib/data/sqlite.py +568 -341
  38. pyxllib/data/sqllib.py +297 -297
  39. pyxllib/ext/JLineViewer.py +505 -492
  40. pyxllib/ext/__init__.py +6 -6
  41. pyxllib/ext/demolib.py +246 -246
  42. pyxllib/ext/drissionlib.py +277 -0
  43. pyxllib/ext/kq5034lib.py +12 -1606
  44. pyxllib/ext/old.py +663 -663
  45. pyxllib/ext/qt.py +449 -449
  46. pyxllib/ext/robustprocfile.py +497 -0
  47. pyxllib/ext/seleniumlib.py +76 -76
  48. pyxllib/ext/tk.py +173 -173
  49. pyxllib/ext/unixlib.py +827 -826
  50. pyxllib/ext/utools.py +351 -338
  51. pyxllib/ext/webhook.py +124 -101
  52. pyxllib/ext/win32lib.py +40 -40
  53. pyxllib/ext/wjxlib.py +88 -0
  54. pyxllib/ext/wpsapi.py +124 -0
  55. pyxllib/ext/xlwork.py +9 -0
  56. pyxllib/ext/yuquelib.py +1105 -173
  57. pyxllib/file/__init__.py +17 -17
  58. pyxllib/file/docxlib.py +761 -761
  59. pyxllib/file/gitlib.py +309 -309
  60. pyxllib/file/libreoffice.py +165 -0
  61. pyxllib/file/movielib.py +148 -139
  62. pyxllib/file/newbie.py +10 -10
  63. pyxllib/file/onenotelib.py +1469 -1469
  64. pyxllib/file/packlib/__init__.py +330 -293
  65. pyxllib/file/packlib/zipfile.py +2441 -2441
  66. pyxllib/file/pdflib.py +426 -426
  67. pyxllib/file/pupil.py +185 -185
  68. pyxllib/file/specialist/__init__.py +685 -685
  69. pyxllib/file/specialist/dirlib.py +799 -799
  70. pyxllib/file/specialist/download.py +193 -186
  71. pyxllib/file/specialist/filelib.py +2829 -2618
  72. pyxllib/file/xlsxlib.py +3131 -2976
  73. pyxllib/file/xlsyncfile.py +341 -0
  74. pyxllib/prog/__init__.py +5 -5
  75. pyxllib/prog/cachetools.py +64 -0
  76. pyxllib/prog/deprecatedlib.py +233 -233
  77. pyxllib/prog/filelock.py +42 -0
  78. pyxllib/prog/ipyexec.py +253 -253
  79. pyxllib/prog/multiprogs.py +940 -0
  80. pyxllib/prog/newbie.py +451 -444
  81. pyxllib/prog/pupil.py +1197 -1128
  82. pyxllib/prog/sitepackages.py +33 -33
  83. pyxllib/prog/specialist/__init__.py +391 -217
  84. pyxllib/prog/specialist/bc.py +203 -200
  85. pyxllib/prog/specialist/browser.py +497 -488
  86. pyxllib/prog/specialist/common.py +347 -347
  87. pyxllib/prog/specialist/datetime.py +199 -131
  88. pyxllib/prog/specialist/tictoc.py +240 -241
  89. pyxllib/prog/specialist/xllog.py +180 -180
  90. pyxllib/prog/xlosenv.py +108 -101
  91. pyxllib/stdlib/__init__.py +17 -17
  92. pyxllib/stdlib/tablepyxl/__init__.py +10 -10
  93. pyxllib/stdlib/tablepyxl/style.py +303 -303
  94. pyxllib/stdlib/tablepyxl/tablepyxl.py +130 -130
  95. pyxllib/text/__init__.py +8 -8
  96. pyxllib/text/ahocorasick.py +39 -39
  97. pyxllib/text/airscript.js +744 -0
  98. pyxllib/text/charclasslib.py +121 -109
  99. pyxllib/text/jiebalib.py +267 -264
  100. pyxllib/text/jinjalib.py +32 -0
  101. pyxllib/text/jsa_ai_prompt.md +271 -0
  102. pyxllib/text/jscode.py +922 -767
  103. pyxllib/text/latex/__init__.py +158 -158
  104. pyxllib/text/levenshtein.py +303 -303
  105. pyxllib/text/nestenv.py +1215 -1215
  106. pyxllib/text/newbie.py +300 -288
  107. pyxllib/text/pupil/__init__.py +8 -8
  108. pyxllib/text/pupil/common.py +1121 -1095
  109. pyxllib/text/pupil/xlalign.py +326 -326
  110. pyxllib/text/pycode.py +47 -47
  111. pyxllib/text/specialist/__init__.py +8 -8
  112. pyxllib/text/specialist/common.py +112 -112
  113. pyxllib/text/specialist/ptag.py +186 -186
  114. pyxllib/text/spellchecker.py +172 -172
  115. pyxllib/text/templates/echart_base.html +11 -0
  116. pyxllib/text/templates/highlight_code.html +17 -0
  117. pyxllib/text/templates/latex_editor.html +103 -0
  118. pyxllib/text/vbacode.py +17 -17
  119. pyxllib/text/xmllib.py +747 -685
  120. pyxllib/xl.py +42 -38
  121. pyxllib/xlcv.py +17 -17
  122. pyxllib-0.3.200.dist-info/METADATA +48 -0
  123. pyxllib-0.3.200.dist-info/RECORD +126 -0
  124. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info}/WHEEL +1 -2
  125. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info/licenses}/LICENSE +190 -190
  126. pyxllib/ext/autogui/__init__.py +0 -8
  127. pyxllib-0.3.96.dist-info/METADATA +0 -51
  128. pyxllib-0.3.96.dist-info/RECORD +0 -333
  129. pyxllib-0.3.96.dist-info/top_level.txt +0 -2
  130. pyxlpr/ai/__init__.py +0 -5
  131. pyxlpr/ai/clientlib.py +0 -1281
  132. pyxlpr/ai/specialist.py +0 -286
  133. pyxlpr/ai/torch_app.py +0 -172
  134. pyxlpr/ai/xlpaddle.py +0 -655
  135. pyxlpr/ai/xltorch.py +0 -705
  136. pyxlpr/data/__init__.py +0 -11
  137. pyxlpr/data/coco.py +0 -1325
  138. pyxlpr/data/datacls.py +0 -365
  139. pyxlpr/data/datasets.py +0 -200
  140. pyxlpr/data/gptlib.py +0 -1291
  141. pyxlpr/data/icdar/__init__.py +0 -96
  142. pyxlpr/data/icdar/deteval.py +0 -377
  143. pyxlpr/data/icdar/icdar2013.py +0 -341
  144. pyxlpr/data/icdar/iou.py +0 -340
  145. pyxlpr/data/icdar/rrc_evaluation_funcs_1_1.py +0 -463
  146. pyxlpr/data/imtextline.py +0 -473
  147. pyxlpr/data/labelme.py +0 -866
  148. pyxlpr/data/removeline.py +0 -179
  149. pyxlpr/data/specialist.py +0 -57
  150. pyxlpr/eval/__init__.py +0 -85
  151. pyxlpr/paddleocr.py +0 -776
  152. pyxlpr/ppocr/__init__.py +0 -15
  153. pyxlpr/ppocr/configs/rec/multi_language/generate_multi_language_configs.py +0 -226
  154. pyxlpr/ppocr/data/__init__.py +0 -135
  155. pyxlpr/ppocr/data/imaug/ColorJitter.py +0 -26
  156. pyxlpr/ppocr/data/imaug/__init__.py +0 -67
  157. pyxlpr/ppocr/data/imaug/copy_paste.py +0 -170
  158. pyxlpr/ppocr/data/imaug/east_process.py +0 -437
  159. pyxlpr/ppocr/data/imaug/gen_table_mask.py +0 -244
  160. pyxlpr/ppocr/data/imaug/iaa_augment.py +0 -114
  161. pyxlpr/ppocr/data/imaug/label_ops.py +0 -789
  162. pyxlpr/ppocr/data/imaug/make_border_map.py +0 -184
  163. pyxlpr/ppocr/data/imaug/make_pse_gt.py +0 -106
  164. pyxlpr/ppocr/data/imaug/make_shrink_map.py +0 -126
  165. pyxlpr/ppocr/data/imaug/operators.py +0 -433
  166. pyxlpr/ppocr/data/imaug/pg_process.py +0 -906
  167. pyxlpr/ppocr/data/imaug/randaugment.py +0 -143
  168. pyxlpr/ppocr/data/imaug/random_crop_data.py +0 -239
  169. pyxlpr/ppocr/data/imaug/rec_img_aug.py +0 -533
  170. pyxlpr/ppocr/data/imaug/sast_process.py +0 -777
  171. pyxlpr/ppocr/data/imaug/text_image_aug/__init__.py +0 -17
  172. pyxlpr/ppocr/data/imaug/text_image_aug/augment.py +0 -120
  173. pyxlpr/ppocr/data/imaug/text_image_aug/warp_mls.py +0 -168
  174. pyxlpr/ppocr/data/lmdb_dataset.py +0 -115
  175. pyxlpr/ppocr/data/pgnet_dataset.py +0 -104
  176. pyxlpr/ppocr/data/pubtab_dataset.py +0 -107
  177. pyxlpr/ppocr/data/simple_dataset.py +0 -372
  178. pyxlpr/ppocr/losses/__init__.py +0 -61
  179. pyxlpr/ppocr/losses/ace_loss.py +0 -52
  180. pyxlpr/ppocr/losses/basic_loss.py +0 -135
  181. pyxlpr/ppocr/losses/center_loss.py +0 -88
  182. pyxlpr/ppocr/losses/cls_loss.py +0 -30
  183. pyxlpr/ppocr/losses/combined_loss.py +0 -67
  184. pyxlpr/ppocr/losses/det_basic_loss.py +0 -208
  185. pyxlpr/ppocr/losses/det_db_loss.py +0 -80
  186. pyxlpr/ppocr/losses/det_east_loss.py +0 -63
  187. pyxlpr/ppocr/losses/det_pse_loss.py +0 -149
  188. pyxlpr/ppocr/losses/det_sast_loss.py +0 -121
  189. pyxlpr/ppocr/losses/distillation_loss.py +0 -272
  190. pyxlpr/ppocr/losses/e2e_pg_loss.py +0 -140
  191. pyxlpr/ppocr/losses/kie_sdmgr_loss.py +0 -113
  192. pyxlpr/ppocr/losses/rec_aster_loss.py +0 -99
  193. pyxlpr/ppocr/losses/rec_att_loss.py +0 -39
  194. pyxlpr/ppocr/losses/rec_ctc_loss.py +0 -44
  195. pyxlpr/ppocr/losses/rec_enhanced_ctc_loss.py +0 -70
  196. pyxlpr/ppocr/losses/rec_nrtr_loss.py +0 -30
  197. pyxlpr/ppocr/losses/rec_sar_loss.py +0 -28
  198. pyxlpr/ppocr/losses/rec_srn_loss.py +0 -47
  199. pyxlpr/ppocr/losses/table_att_loss.py +0 -109
  200. pyxlpr/ppocr/metrics/__init__.py +0 -44
  201. pyxlpr/ppocr/metrics/cls_metric.py +0 -45
  202. pyxlpr/ppocr/metrics/det_metric.py +0 -82
  203. pyxlpr/ppocr/metrics/distillation_metric.py +0 -73
  204. pyxlpr/ppocr/metrics/e2e_metric.py +0 -86
  205. pyxlpr/ppocr/metrics/eval_det_iou.py +0 -274
  206. pyxlpr/ppocr/metrics/kie_metric.py +0 -70
  207. pyxlpr/ppocr/metrics/rec_metric.py +0 -75
  208. pyxlpr/ppocr/metrics/table_metric.py +0 -50
  209. pyxlpr/ppocr/modeling/architectures/__init__.py +0 -32
  210. pyxlpr/ppocr/modeling/architectures/base_model.py +0 -88
  211. pyxlpr/ppocr/modeling/architectures/distillation_model.py +0 -60
  212. pyxlpr/ppocr/modeling/backbones/__init__.py +0 -54
  213. pyxlpr/ppocr/modeling/backbones/det_mobilenet_v3.py +0 -268
  214. pyxlpr/ppocr/modeling/backbones/det_resnet_vd.py +0 -246
  215. pyxlpr/ppocr/modeling/backbones/det_resnet_vd_sast.py +0 -285
  216. pyxlpr/ppocr/modeling/backbones/e2e_resnet_vd_pg.py +0 -265
  217. pyxlpr/ppocr/modeling/backbones/kie_unet_sdmgr.py +0 -186
  218. pyxlpr/ppocr/modeling/backbones/rec_mobilenet_v3.py +0 -138
  219. pyxlpr/ppocr/modeling/backbones/rec_mv1_enhance.py +0 -258
  220. pyxlpr/ppocr/modeling/backbones/rec_nrtr_mtb.py +0 -48
  221. pyxlpr/ppocr/modeling/backbones/rec_resnet_31.py +0 -210
  222. pyxlpr/ppocr/modeling/backbones/rec_resnet_aster.py +0 -143
  223. pyxlpr/ppocr/modeling/backbones/rec_resnet_fpn.py +0 -307
  224. pyxlpr/ppocr/modeling/backbones/rec_resnet_vd.py +0 -286
  225. pyxlpr/ppocr/modeling/heads/__init__.py +0 -54
  226. pyxlpr/ppocr/modeling/heads/cls_head.py +0 -52
  227. pyxlpr/ppocr/modeling/heads/det_db_head.py +0 -118
  228. pyxlpr/ppocr/modeling/heads/det_east_head.py +0 -121
  229. pyxlpr/ppocr/modeling/heads/det_pse_head.py +0 -37
  230. pyxlpr/ppocr/modeling/heads/det_sast_head.py +0 -128
  231. pyxlpr/ppocr/modeling/heads/e2e_pg_head.py +0 -253
  232. pyxlpr/ppocr/modeling/heads/kie_sdmgr_head.py +0 -206
  233. pyxlpr/ppocr/modeling/heads/multiheadAttention.py +0 -163
  234. pyxlpr/ppocr/modeling/heads/rec_aster_head.py +0 -393
  235. pyxlpr/ppocr/modeling/heads/rec_att_head.py +0 -202
  236. pyxlpr/ppocr/modeling/heads/rec_ctc_head.py +0 -88
  237. pyxlpr/ppocr/modeling/heads/rec_nrtr_head.py +0 -826
  238. pyxlpr/ppocr/modeling/heads/rec_sar_head.py +0 -402
  239. pyxlpr/ppocr/modeling/heads/rec_srn_head.py +0 -280
  240. pyxlpr/ppocr/modeling/heads/self_attention.py +0 -406
  241. pyxlpr/ppocr/modeling/heads/table_att_head.py +0 -246
  242. pyxlpr/ppocr/modeling/necks/__init__.py +0 -32
  243. pyxlpr/ppocr/modeling/necks/db_fpn.py +0 -111
  244. pyxlpr/ppocr/modeling/necks/east_fpn.py +0 -188
  245. pyxlpr/ppocr/modeling/necks/fpn.py +0 -138
  246. pyxlpr/ppocr/modeling/necks/pg_fpn.py +0 -314
  247. pyxlpr/ppocr/modeling/necks/rnn.py +0 -92
  248. pyxlpr/ppocr/modeling/necks/sast_fpn.py +0 -284
  249. pyxlpr/ppocr/modeling/necks/table_fpn.py +0 -110
  250. pyxlpr/ppocr/modeling/transforms/__init__.py +0 -28
  251. pyxlpr/ppocr/modeling/transforms/stn.py +0 -135
  252. pyxlpr/ppocr/modeling/transforms/tps.py +0 -308
  253. pyxlpr/ppocr/modeling/transforms/tps_spatial_transformer.py +0 -156
  254. pyxlpr/ppocr/optimizer/__init__.py +0 -61
  255. pyxlpr/ppocr/optimizer/learning_rate.py +0 -228
  256. pyxlpr/ppocr/optimizer/lr_scheduler.py +0 -49
  257. pyxlpr/ppocr/optimizer/optimizer.py +0 -160
  258. pyxlpr/ppocr/optimizer/regularizer.py +0 -52
  259. pyxlpr/ppocr/postprocess/__init__.py +0 -55
  260. pyxlpr/ppocr/postprocess/cls_postprocess.py +0 -33
  261. pyxlpr/ppocr/postprocess/db_postprocess.py +0 -234
  262. pyxlpr/ppocr/postprocess/east_postprocess.py +0 -143
  263. pyxlpr/ppocr/postprocess/locality_aware_nms.py +0 -200
  264. pyxlpr/ppocr/postprocess/pg_postprocess.py +0 -52
  265. pyxlpr/ppocr/postprocess/pse_postprocess/__init__.py +0 -15
  266. pyxlpr/ppocr/postprocess/pse_postprocess/pse/__init__.py +0 -29
  267. pyxlpr/ppocr/postprocess/pse_postprocess/pse/setup.py +0 -14
  268. pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py +0 -118
  269. pyxlpr/ppocr/postprocess/rec_postprocess.py +0 -654
  270. pyxlpr/ppocr/postprocess/sast_postprocess.py +0 -355
  271. pyxlpr/ppocr/tools/__init__.py +0 -14
  272. pyxlpr/ppocr/tools/eval.py +0 -83
  273. pyxlpr/ppocr/tools/export_center.py +0 -77
  274. pyxlpr/ppocr/tools/export_model.py +0 -129
  275. pyxlpr/ppocr/tools/infer/predict_cls.py +0 -151
  276. pyxlpr/ppocr/tools/infer/predict_det.py +0 -300
  277. pyxlpr/ppocr/tools/infer/predict_e2e.py +0 -169
  278. pyxlpr/ppocr/tools/infer/predict_rec.py +0 -414
  279. pyxlpr/ppocr/tools/infer/predict_system.py +0 -204
  280. pyxlpr/ppocr/tools/infer/utility.py +0 -629
  281. pyxlpr/ppocr/tools/infer_cls.py +0 -83
  282. pyxlpr/ppocr/tools/infer_det.py +0 -134
  283. pyxlpr/ppocr/tools/infer_e2e.py +0 -122
  284. pyxlpr/ppocr/tools/infer_kie.py +0 -153
  285. pyxlpr/ppocr/tools/infer_rec.py +0 -146
  286. pyxlpr/ppocr/tools/infer_table.py +0 -107
  287. pyxlpr/ppocr/tools/program.py +0 -596
  288. pyxlpr/ppocr/tools/test_hubserving.py +0 -117
  289. pyxlpr/ppocr/tools/train.py +0 -163
  290. pyxlpr/ppocr/tools/xlprog.py +0 -748
  291. pyxlpr/ppocr/utils/EN_symbol_dict.txt +0 -94
  292. pyxlpr/ppocr/utils/__init__.py +0 -24
  293. pyxlpr/ppocr/utils/dict/ar_dict.txt +0 -117
  294. pyxlpr/ppocr/utils/dict/arabic_dict.txt +0 -162
  295. pyxlpr/ppocr/utils/dict/be_dict.txt +0 -145
  296. pyxlpr/ppocr/utils/dict/bg_dict.txt +0 -140
  297. pyxlpr/ppocr/utils/dict/chinese_cht_dict.txt +0 -8421
  298. pyxlpr/ppocr/utils/dict/cyrillic_dict.txt +0 -163
  299. pyxlpr/ppocr/utils/dict/devanagari_dict.txt +0 -167
  300. pyxlpr/ppocr/utils/dict/en_dict.txt +0 -63
  301. pyxlpr/ppocr/utils/dict/fa_dict.txt +0 -136
  302. pyxlpr/ppocr/utils/dict/french_dict.txt +0 -136
  303. pyxlpr/ppocr/utils/dict/german_dict.txt +0 -143
  304. pyxlpr/ppocr/utils/dict/hi_dict.txt +0 -162
  305. pyxlpr/ppocr/utils/dict/it_dict.txt +0 -118
  306. pyxlpr/ppocr/utils/dict/japan_dict.txt +0 -4399
  307. pyxlpr/ppocr/utils/dict/ka_dict.txt +0 -153
  308. pyxlpr/ppocr/utils/dict/korean_dict.txt +0 -3688
  309. pyxlpr/ppocr/utils/dict/latin_dict.txt +0 -185
  310. pyxlpr/ppocr/utils/dict/mr_dict.txt +0 -153
  311. pyxlpr/ppocr/utils/dict/ne_dict.txt +0 -153
  312. pyxlpr/ppocr/utils/dict/oc_dict.txt +0 -96
  313. pyxlpr/ppocr/utils/dict/pu_dict.txt +0 -130
  314. pyxlpr/ppocr/utils/dict/rs_dict.txt +0 -91
  315. pyxlpr/ppocr/utils/dict/rsc_dict.txt +0 -134
  316. pyxlpr/ppocr/utils/dict/ru_dict.txt +0 -125
  317. pyxlpr/ppocr/utils/dict/ta_dict.txt +0 -128
  318. pyxlpr/ppocr/utils/dict/table_dict.txt +0 -277
  319. pyxlpr/ppocr/utils/dict/table_structure_dict.txt +0 -2759
  320. pyxlpr/ppocr/utils/dict/te_dict.txt +0 -151
  321. pyxlpr/ppocr/utils/dict/ug_dict.txt +0 -114
  322. pyxlpr/ppocr/utils/dict/uk_dict.txt +0 -142
  323. pyxlpr/ppocr/utils/dict/ur_dict.txt +0 -137
  324. pyxlpr/ppocr/utils/dict/xi_dict.txt +0 -110
  325. pyxlpr/ppocr/utils/dict90.txt +0 -90
  326. pyxlpr/ppocr/utils/e2e_metric/Deteval.py +0 -574
  327. pyxlpr/ppocr/utils/e2e_metric/polygon_fast.py +0 -83
  328. pyxlpr/ppocr/utils/e2e_utils/extract_batchsize.py +0 -87
  329. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_fast.py +0 -457
  330. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_slow.py +0 -592
  331. pyxlpr/ppocr/utils/e2e_utils/pgnet_pp_utils.py +0 -162
  332. pyxlpr/ppocr/utils/e2e_utils/visual.py +0 -162
  333. pyxlpr/ppocr/utils/en_dict.txt +0 -95
  334. pyxlpr/ppocr/utils/gen_label.py +0 -81
  335. pyxlpr/ppocr/utils/ic15_dict.txt +0 -36
  336. pyxlpr/ppocr/utils/iou.py +0 -54
  337. pyxlpr/ppocr/utils/logging.py +0 -69
  338. pyxlpr/ppocr/utils/network.py +0 -84
  339. pyxlpr/ppocr/utils/ppocr_keys_v1.txt +0 -6623
  340. pyxlpr/ppocr/utils/profiler.py +0 -110
  341. pyxlpr/ppocr/utils/save_load.py +0 -150
  342. pyxlpr/ppocr/utils/stats.py +0 -72
  343. pyxlpr/ppocr/utils/utility.py +0 -80
  344. pyxlpr/ppstructure/__init__.py +0 -13
  345. pyxlpr/ppstructure/predict_system.py +0 -187
  346. pyxlpr/ppstructure/table/__init__.py +0 -13
  347. pyxlpr/ppstructure/table/eval_table.py +0 -72
  348. pyxlpr/ppstructure/table/matcher.py +0 -192
  349. pyxlpr/ppstructure/table/predict_structure.py +0 -136
  350. pyxlpr/ppstructure/table/predict_table.py +0 -221
  351. pyxlpr/ppstructure/table/table_metric/__init__.py +0 -16
  352. pyxlpr/ppstructure/table/table_metric/parallel.py +0 -51
  353. pyxlpr/ppstructure/table/table_metric/table_metric.py +0 -247
  354. pyxlpr/ppstructure/table/tablepyxl/__init__.py +0 -13
  355. pyxlpr/ppstructure/table/tablepyxl/style.py +0 -283
  356. pyxlpr/ppstructure/table/tablepyxl/tablepyxl.py +0 -118
  357. pyxlpr/ppstructure/utility.py +0 -71
  358. pyxlpr/xlai.py +0 -10
@@ -1,347 +1,347 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # @Author : 陈坤泽
4
- # @Email : 877362867@qq.com
5
- # @Date : 2020/06/02 11:09
6
-
7
- from collections import defaultdict, Counter
8
- import copy
9
- import re
10
- import sys
11
-
12
- import pandas as pd
13
- from more_itertools import unique_everseen
14
-
15
- from pyxllib.prog.newbie import typename
16
- from pyxllib.algo.pupil import natural_sort_key
17
- from pyxllib.text.pupil import shorten, east_asian_shorten
18
-
19
-
20
- def dataframe_str(df, *args, ambiguous_as_wide=None, shorten=True):
21
- """输出DataFrame
22
- DataFrame可以直接输出的,这里是增加了对中文字符的对齐效果支持
23
-
24
- :param df: DataFrame数据结构
25
- :param args: option_context格式控制
26
- :param ambiguous_as_wide: 是否对①②③这种域宽有歧义的设为宽字符
27
- win32平台上和linux上①域宽不同,默认win32是域宽2,linux是域宽1
28
- :param shorten: 是否对每个元素提前进行字符串化并控制长度在display.max_colwidth以内
29
- 因为pandas的字符串截取遇到中文是有问题的,可以用我自定义的函数先做截取
30
- 默认开启,不过这步比较消耗时间
31
-
32
- >> df = pd.DataFrame({'哈哈': ['a'*100, '哈\n①'*10, 'a哈'*100]})
33
- 哈哈
34
- 0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
35
- 1 哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①...
36
- 2 a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a...
37
- """
38
- import pandas as pd
39
-
40
- if ambiguous_as_wide is None:
41
- ambiguous_as_wide = sys.platform == 'win32'
42
- with pd.option_context('display.unicode.east_asian_width', True, # 中文输出必备选项,用来控制正确的域宽
43
- 'display.unicode.ambiguous_as_wide', ambiguous_as_wide,
44
- 'max_columns', 20, # 最大列数设置到20列
45
- 'display.width', 200, # 最大宽度设置到200
46
- *args):
47
- if shorten: # applymap可以对所有的元素进行映射处理,并返回一个新的df
48
- df = df.applymap(lambda x: east_asian_shorten(str(x), pd.options.display.max_colwidth))
49
- s = str(df)
50
- return s
51
-
52
-
53
- class TypeConvert:
54
- @classmethod
55
- def dict2list(cls, d: dict, *, nsort=False):
56
- """ 字典转n*2的list
57
-
58
- :param d: 字典
59
- :param nsort:
60
- True: 对key使用自然排序
61
- False: 使用d默认的遍历顺序
62
- :return:
63
- """
64
- ls = list(d.items())
65
- if nsort:
66
- ls = sorted(ls, key=lambda x: natural_sort_key(str(x[0])))
67
- return ls
68
-
69
- @classmethod
70
- def dict2df(cls, d):
71
- """dict类型转DataFrame类型"""
72
- name = typename(d)
73
- if isinstance(d, Counter):
74
- li = d.most_common()
75
- else:
76
- li = cls.dict2list(d, nsort=True)
77
- return pd.DataFrame.from_records(li, columns=(f'{name}-key', f'{name}-value'))
78
-
79
- @classmethod
80
- def list2df(cls, li):
81
- if li and isinstance(li[0], (list, tuple)): # 有两维时按表格显示
82
- df = pd.DataFrame.from_records(li)
83
- else: # 只有一维时按一列显示
84
- df = pd.DataFrame(pd.Series(li), columns=(typename(li),))
85
- return df
86
-
87
- @classmethod
88
- def try2df(cls, arg):
89
- """尝试将各种不同的类型转成dataframe"""
90
- if isinstance(arg, dict):
91
- df = cls.dict2df(arg)
92
- elif isinstance(arg, (list, tuple)):
93
- df = cls.list2df(arg)
94
- elif isinstance(arg, pd.Series):
95
- df = pd.DataFrame(arg)
96
- else:
97
- df = arg
98
- return df
99
-
100
-
101
- class NestedDict:
102
- """ 字典嵌套结构相关功能
103
-
104
- TODO 感觉跟 pprint 的嵌套识别美化输出相关,可能有些代码是可以结合简化的~~
105
- """
106
-
107
- @classmethod
108
- def has_subdict(cls, data, include_self=True):
109
- """是否含有dict子结构
110
- :param include_self: 是否包含自身,即data本身是一个dict的话,也认为has_subdict是True
111
- """
112
- if include_self and isinstance(data, dict):
113
- return True
114
- elif isinstance(data, (list, tuple, set)):
115
- for v in data:
116
- if cls.has_subdict(v):
117
- return True
118
- return False
119
-
120
- @classmethod
121
- def to_html_table(cls, data, max_items=10):
122
- """ 以html表格套表格的形式,展示一个嵌套结构数据
123
-
124
- :param data: 数据
125
- :param max_items: 项目显示上限,有些数据项目太多了,要精简下
126
- 设为假值则不设上限
127
- :return:
128
-
129
- TODO 这个速度有点慢,怎么加速?
130
- """
131
-
132
- def tohtml(d):
133
- if max_items:
134
- df = TypeConvert.try2df(d)
135
- if len(df) > max_items:
136
- n = len(df)
137
- return df[:max_items].to_html(escape=False) + f'... {n - 1}'
138
- else:
139
- return df.to_html(escape=False)
140
- else:
141
- return TypeConvert.try2df(d).to_html(escape=False)
142
-
143
- if not cls.has_subdict(data):
144
- res = str(data)
145
- elif isinstance(data, dict):
146
- if isinstance(data, Counter):
147
- d = data
148
- else:
149
- d = dict()
150
- for k, v in data.items():
151
- if cls.has_subdict(v):
152
- v = cls.to_html_table(v, max_items=max_items)
153
- d[k] = v
154
- res = tohtml(d)
155
- else:
156
- li = [cls.to_html_table(x, max_items=max_items) for x in data]
157
- res = tohtml(li)
158
-
159
- return res.replace('\n', ' ')
160
-
161
-
162
- class KeyValuesCounter:
163
- """ 各种键值对出现次数的统计
164
- 会递归找子字典结构,但不存储结构信息,只记录纯粹的键值对信息
165
-
166
- 应用场景:对未知的json结构,批量读取后,显示所有键值对的出现情况
167
- """
168
-
169
- def __init__(self):
170
- self.kvs = defaultdict(Counter)
171
-
172
- def add(self, data, max_value_length=100):
173
- """
174
- :param max_value_length: 添加的值,进行截断,防止有些值太长
175
- """
176
- if not NestedDict.has_subdict(data):
177
- return
178
- elif isinstance(data, dict):
179
- for k, v in data.items():
180
- if NestedDict.has_subdict(v):
181
- self.add(v)
182
- else:
183
- self.kvs[k][shorten(str(v), max_value_length)] += 1
184
- else: # 否则 data 应该是个可迭代对象,才可能含有dict
185
- for x in data:
186
- self.add(x)
187
-
188
- def to_html_table(self, max_items=10):
189
- return NestedDict.to_html_table(self.kvs, max_items=max_items)
190
-
191
-
192
- class JsonStructParser:
193
- """ 类json数据格式的结构解析
194
-
195
- 【名称定义】
196
- item: 一条类json的数据条目
197
- path: 用类路径的格式,表达item中某个数值的索引。例如
198
- /a/b/3/c: 相当于 item['a']['b'][3]['c']
199
- 有一些特殊的path,例如容器类会以/结尾: /a/
200
- 以及一般会带上数值的类型标记,区分度更精确:/a/=dict
201
- pathx: 泛指下述中某种格式
202
- pathlist: list, 一条item对应的扁平化的路径
203
- pathstr/struct: paths拼接成一个str
204
- pathdict: paths重新组装成一个dict字典(未实装,太难写,性价比也低)
205
- """
206
-
207
- default_cfg = {'include_container': True, # 包含容器(dict、list)的路径
208
- 'value_type': True, # 是否带上后缀:数值的类型
209
- # 可以输入一个自定义的路径合并函数 path,type=merge_path(path,type)。
210
- # 一般是字典中出现不断变化的数值id,格式不统一,使用一定的规则,可以将path几种相近的冗余形式合并。
211
- # 也可以设True,默认会将数值类统一为0。
212
- 'merge_path': False,
213
- }
214
-
215
- @classmethod
216
- def _get_item_path_types(cls, item, prefix=''):
217
- """
218
- :param item: 类json结构的数据,可以含有类型: dict, list(tuple), int, str, bool, None
219
- 结点类型
220
- 其中 dict、list称为 container 容器类型
221
- 其他int、str称为数值类型
222
- 结构
223
- item 可以看成一个树形结构
224
- 其中数值类型可以视为叶子结点,其他容器类是非叶子结点
225
-
226
- 瑕疵
227
- 1、如果key本身带有"/",会导致混乱
228
- 2、list的下标转为0123,和字符串类型的key会混淆,和普通的字典key也会混淆
229
- """
230
- path_types = []
231
- if isinstance(item, dict):
232
- path_types.append([prefix + '/', 'dict'])
233
- for k in sorted(item.keys()): # 实验表明,在这里对字典的键进行排序就行,最后总的paths不要排序,不然结构会乱
234
- v = item[k]
235
- path_types.extend(cls._get_item_path_types(v, f'{prefix}/{k}'))
236
- elif isinstance(item, (list, tuple)):
237
- path_types.append([prefix + '/', type(item).__name__])
238
- for k, v in enumerate(item):
239
- path_types.extend(cls._get_item_path_types(v, f'{prefix}/{k}'))
240
- else:
241
- path_types.append([prefix, type(item).__name__])
242
- return path_types
243
-
244
- @classmethod
245
- def get_item_pathlist(cls, item, prefix='', **kwargs):
246
- """ 获得字典的结构标识
247
- """
248
- # 1 底层数据
249
- cfg = copy.copy(cls.default_cfg)
250
- cfg.update(kwargs)
251
- paths = cls._get_item_path_types(item, prefix)
252
-
253
- # 2 配置参数
254
- if cfg['merge_path']:
255
- if callable(cfg['merge_path']):
256
- func = cfg['merge_path']
257
- else:
258
- def func(p, t):
259
- return re.sub(r'\d+', '0', p), t
260
-
261
- # 保序去重
262
- paths = list(unique_everseen(map(lambda x: func(x[0], x[1]), paths)))
263
-
264
- if not cfg['include_container']:
265
- paths = [pt for pt in paths if (pt[0][-1] != '/')]
266
-
267
- if cfg['value_type']:
268
- paths = ['='.join(pt) for pt in paths]
269
- else:
270
- paths = [pt[0] for pt in paths]
271
-
272
- return paths
273
-
274
- @classmethod
275
- def get_item_pathstr(cls, item, prefix='', **kwargs):
276
- paths = cls.get_item_pathlist(item, prefix, **kwargs)
277
- return '\n'.join(paths)
278
-
279
- @classmethod
280
- def get_items_struct2cnt(cls, items, **kwargs):
281
- # 1 统计每种结构出现的次数
282
- struct2cnt = Counter()
283
- for item in items:
284
- pathstr = cls.get_item_pathstr(item, **kwargs)
285
- struct2cnt[pathstr] += 1
286
- # 2 按照从多到少排序
287
- struct2cnt = Counter(dict(sorted(struct2cnt.items(), key=lambda item: -item[1])))
288
- return struct2cnt
289
-
290
- @classmethod
291
- def get_items_structdf(cls, items, **kwargs):
292
- """ 分析一组题目里,出现了多少种不同的json结构 """
293
- # 1 获取原始数据,初始化
294
- struct2cnt = cls.get_items_struct2cnt(items, **kwargs)
295
- m = len(struct2cnt)
296
-
297
- # 2 path2cnt
298
- path2cnt = Counter()
299
- for struct in struct2cnt.keys():
300
- path2cnt.update({path: struct2cnt[struct] for path in struct.splitlines()})
301
- paths = sorted(path2cnt.keys(), key=lambda path: re.split(r'/=', path))
302
- path2cnt = {path: path2cnt[path] for path in paths}
303
-
304
- # 3 生成统计表
305
- ls = []
306
- columns = ['path', 'total'] + [f'struct{i}' for i in range(1, m + 1)]
307
- for path, cnt in path2cnt.items():
308
- row = [path, cnt]
309
- for struct, cnt in struct2cnt.items():
310
- t = cnt if path in struct else 0
311
- row.append(t)
312
- ls.append(row)
313
-
314
- df = pd.DataFrame.from_records(ls, columns=columns)
315
- return df
316
-
317
- @classmethod
318
- def get_itemgroups_structdf(cls, itemgroups, **kwargs):
319
- """ 分析不同套数据间的json结构区别
320
-
321
- 这里为了减少冗余开发,直接复用get_items_structdf
322
- 虽然会造成一些冗余功能,
323
- """
324
- # 1 统计所有paths出现情况
325
- n = len(itemgroups)
326
- d = dict()
327
- for i, gs in enumerate(itemgroups):
328
- for x in gs:
329
- paths = cls.get_item_pathlist(x, **kwargs)
330
- for p in paths:
331
- if p not in d:
332
- d[p] = [0] * n
333
- d[p][i] += 1
334
-
335
- # 排序
336
- paths = sorted(d.keys(), key=lambda path: re.split(r'/=', path))
337
-
338
- # 2 统计表
339
- ls = []
340
- columns = ['path', 'total'] + [f'group{i}' for i in range(1, n + 1)]
341
- for path in paths:
342
- vals = d[path]
343
- row = [path, sum(vals)] + vals
344
- ls.append(row)
345
-
346
- df = pd.DataFrame.from_records(ls, columns=columns)
347
- return df
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # @Author : 陈坤泽
4
+ # @Email : 877362867@qq.com
5
+ # @Date : 2020/06/02 11:09
6
+
7
+ from collections import defaultdict, Counter
8
+ import copy
9
+ import re
10
+ import sys
11
+
12
+ import pandas as pd
13
+ from more_itertools import unique_everseen
14
+
15
+ from pyxllib.prog.newbie import typename
16
+ from pyxllib.algo.pupil import natural_sort_key
17
+ from pyxllib.text.pupil import shorten, east_asian_shorten
18
+
19
+
20
+ def dataframe_str(df, *args, ambiguous_as_wide=None, shorten=True):
21
+ """输出DataFrame
22
+ DataFrame可以直接输出的,这里是增加了对中文字符的对齐效果支持
23
+
24
+ :param df: DataFrame数据结构
25
+ :param args: option_context格式控制
26
+ :param ambiguous_as_wide: 是否对①②③这种域宽有歧义的设为宽字符
27
+ win32平台上和linux上①域宽不同,默认win32是域宽2,linux是域宽1
28
+ :param shorten: 是否对每个元素提前进行字符串化并控制长度在display.max_colwidth以内
29
+ 因为pandas的字符串截取遇到中文是有问题的,可以用我自定义的函数先做截取
30
+ 默认开启,不过这步比较消耗时间
31
+
32
+ >> df = pd.DataFrame({'哈哈': ['a'*100, '哈\n①'*10, 'a哈'*100]})
33
+ 哈哈
34
+ 0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
35
+ 1 哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①哈 ①...
36
+ 2 a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a哈a...
37
+ """
38
+ import pandas as pd
39
+
40
+ if ambiguous_as_wide is None:
41
+ ambiguous_as_wide = sys.platform == 'win32'
42
+ with pd.option_context('display.unicode.east_asian_width', True, # 中文输出必备选项,用来控制正确的域宽
43
+ 'display.unicode.ambiguous_as_wide', ambiguous_as_wide,
44
+ 'max_columns', 20, # 最大列数设置到20列
45
+ 'display.width', 200, # 最大宽度设置到200
46
+ *args):
47
+ if shorten: # applymap可以对所有的元素进行映射处理,并返回一个新的df
48
+ df = df.applymap(lambda x: east_asian_shorten(str(x), pd.options.display.max_colwidth))
49
+ s = str(df)
50
+ return s
51
+
52
+
53
+ class TypeConvert:
54
+ @classmethod
55
+ def dict2list(cls, d: dict, *, nsort=False):
56
+ """ 字典转n*2的list
57
+
58
+ :param d: 字典
59
+ :param nsort:
60
+ True: 对key使用自然排序
61
+ False: 使用d默认的遍历顺序
62
+ :return:
63
+ """
64
+ ls = list(d.items())
65
+ if nsort:
66
+ ls = sorted(ls, key=lambda x: natural_sort_key(str(x[0])))
67
+ return ls
68
+
69
+ @classmethod
70
+ def dict2df(cls, d):
71
+ """dict类型转DataFrame类型"""
72
+ name = typename(d)
73
+ if isinstance(d, Counter):
74
+ li = d.most_common()
75
+ else:
76
+ li = cls.dict2list(d, nsort=True)
77
+ return pd.DataFrame.from_records(li, columns=(f'{name}-key', f'{name}-value'))
78
+
79
+ @classmethod
80
+ def list2df(cls, li):
81
+ if li and isinstance(li[0], (list, tuple)): # 有两维时按表格显示
82
+ df = pd.DataFrame.from_records(li)
83
+ else: # 只有一维时按一列显示
84
+ df = pd.DataFrame(pd.Series(li), columns=(typename(li),))
85
+ return df
86
+
87
+ @classmethod
88
+ def try2df(cls, arg):
89
+ """尝试将各种不同的类型转成dataframe"""
90
+ if isinstance(arg, dict):
91
+ df = cls.dict2df(arg)
92
+ elif isinstance(arg, (list, tuple)):
93
+ df = cls.list2df(arg)
94
+ elif isinstance(arg, pd.Series):
95
+ df = pd.DataFrame(arg)
96
+ else:
97
+ df = arg
98
+ return df
99
+
100
+
101
+ class NestedDict:
102
+ """ 字典嵌套结构相关功能
103
+
104
+ TODO 感觉跟 pprint 的嵌套识别美化输出相关,可能有些代码是可以结合简化的~~
105
+ """
106
+
107
+ @classmethod
108
+ def has_subdict(cls, data, include_self=True):
109
+ """是否含有dict子结构
110
+ :param include_self: 是否包含自身,即data本身是一个dict的话,也认为has_subdict是True
111
+ """
112
+ if include_self and isinstance(data, dict):
113
+ return True
114
+ elif isinstance(data, (list, tuple, set)):
115
+ for v in data:
116
+ if cls.has_subdict(v):
117
+ return True
118
+ return False
119
+
120
+ @classmethod
121
+ def to_html_table(cls, data, max_items=10):
122
+ """ 以html表格套表格的形式,展示一个嵌套结构数据
123
+
124
+ :param data: 数据
125
+ :param max_items: 项目显示上限,有些数据项目太多了,要精简下
126
+ 设为假值则不设上限
127
+ :return:
128
+
129
+ TODO 这个速度有点慢,怎么加速?
130
+ """
131
+
132
+ def tohtml(d):
133
+ if max_items:
134
+ df = TypeConvert.try2df(d)
135
+ if len(df) > max_items:
136
+ n = len(df)
137
+ return df[:max_items].to_html(escape=False) + f'... {n - 1}'
138
+ else:
139
+ return df.to_html(escape=False)
140
+ else:
141
+ return TypeConvert.try2df(d).to_html(escape=False)
142
+
143
+ if not cls.has_subdict(data):
144
+ res = str(data)
145
+ elif isinstance(data, dict):
146
+ if isinstance(data, Counter):
147
+ d = data
148
+ else:
149
+ d = dict()
150
+ for k, v in data.items():
151
+ if cls.has_subdict(v):
152
+ v = cls.to_html_table(v, max_items=max_items)
153
+ d[k] = v
154
+ res = tohtml(d)
155
+ else:
156
+ li = [cls.to_html_table(x, max_items=max_items) for x in data]
157
+ res = tohtml(li)
158
+
159
+ return res.replace('\n', ' ')
160
+
161
+
162
+ class KeyValuesCounter:
163
+ """ 各种键值对出现次数的统计
164
+ 会递归找子字典结构,但不存储结构信息,只记录纯粹的键值对信息
165
+
166
+ 应用场景:对未知的json结构,批量读取后,显示所有键值对的出现情况
167
+ """
168
+
169
+ def __init__(self):
170
+ self.kvs = defaultdict(Counter)
171
+
172
+ def add(self, data, max_value_length=100):
173
+ """
174
+ :param max_value_length: 添加的值,进行截断,防止有些值太长
175
+ """
176
+ if not NestedDict.has_subdict(data):
177
+ return
178
+ elif isinstance(data, dict):
179
+ for k, v in data.items():
180
+ if NestedDict.has_subdict(v):
181
+ self.add(v)
182
+ else:
183
+ self.kvs[k][shorten(str(v), max_value_length)] += 1
184
+ else: # 否则 data 应该是个可迭代对象,才可能含有dict
185
+ for x in data:
186
+ self.add(x)
187
+
188
+ def to_html_table(self, max_items=10):
189
+ return NestedDict.to_html_table(self.kvs, max_items=max_items)
190
+
191
+
192
+ class JsonStructParser:
193
+ """ 类json数据格式的结构解析
194
+
195
+ 【名称定义】
196
+ item: 一条类json的数据条目
197
+ path: 用类路径的格式,表达item中某个数值的索引。例如
198
+ /a/b/3/c: 相当于 item['a']['b'][3]['c']
199
+ 有一些特殊的path,例如容器类会以/结尾: /a/
200
+ 以及一般会带上数值的类型标记,区分度更精确:/a/=dict
201
+ pathx: 泛指下述中某种格式
202
+ pathlist: list, 一条item对应的扁平化的路径
203
+ pathstr/struct: paths拼接成一个str
204
+ pathdict: paths重新组装成一个dict字典(未实装,太难写,性价比也低)
205
+ """
206
+
207
+ default_cfg = {'include_container': True, # 包含容器(dict、list)的路径
208
+ 'value_type': True, # 是否带上后缀:数值的类型
209
+ # 可以输入一个自定义的路径合并函数 path,type=merge_path(path,type)。
210
+ # 一般是字典中出现不断变化的数值id,格式不统一,使用一定的规则,可以将path几种相近的冗余形式合并。
211
+ # 也可以设True,默认会将数值类统一为0。
212
+ 'merge_path': False,
213
+ }
214
+
215
+ @classmethod
216
+ def _get_item_path_types(cls, item, prefix=''):
217
+ """
218
+ :param item: 类json结构的数据,可以含有类型: dict, list(tuple), int, str, bool, None
219
+ 结点类型
220
+ 其中 dict、list称为 container 容器类型
221
+ 其他int、str称为数值类型
222
+ 结构
223
+ item 可以看成一个树形结构
224
+ 其中数值类型可以视为叶子结点,其他容器类是非叶子结点
225
+
226
+ 瑕疵
227
+ 1、如果key本身带有"/",会导致混乱
228
+ 2、list的下标转为0123,和字符串类型的key会混淆,和普通的字典key也会混淆
229
+ """
230
+ path_types = []
231
+ if isinstance(item, dict):
232
+ path_types.append([prefix + '/', 'dict'])
233
+ for k in sorted(item.keys()): # 实验表明,在这里对字典的键进行排序就行,最后总的paths不要排序,不然结构会乱
234
+ v = item[k]
235
+ path_types.extend(cls._get_item_path_types(v, f'{prefix}/{k}'))
236
+ elif isinstance(item, (list, tuple)):
237
+ path_types.append([prefix + '/', type(item).__name__])
238
+ for k, v in enumerate(item):
239
+ path_types.extend(cls._get_item_path_types(v, f'{prefix}/{k}'))
240
+ else:
241
+ path_types.append([prefix, type(item).__name__])
242
+ return path_types
243
+
244
+ @classmethod
245
+ def get_item_pathlist(cls, item, prefix='', **kwargs):
246
+ """ 获得字典的结构标识
247
+ """
248
+ # 1 底层数据
249
+ cfg = copy.copy(cls.default_cfg)
250
+ cfg.update(kwargs)
251
+ paths = cls._get_item_path_types(item, prefix)
252
+
253
+ # 2 配置参数
254
+ if cfg['merge_path']:
255
+ if callable(cfg['merge_path']):
256
+ func = cfg['merge_path']
257
+ else:
258
+ def func(p, t):
259
+ return re.sub(r'\d+', '0', p), t
260
+
261
+ # 保序去重
262
+ paths = list(unique_everseen(map(lambda x: func(x[0], x[1]), paths)))
263
+
264
+ if not cfg['include_container']:
265
+ paths = [pt for pt in paths if (pt[0][-1] != '/')]
266
+
267
+ if cfg['value_type']:
268
+ paths = ['='.join(pt) for pt in paths]
269
+ else:
270
+ paths = [pt[0] for pt in paths]
271
+
272
+ return paths
273
+
274
+ @classmethod
275
+ def get_item_pathstr(cls, item, prefix='', **kwargs):
276
+ paths = cls.get_item_pathlist(item, prefix, **kwargs)
277
+ return '\n'.join(paths)
278
+
279
+ @classmethod
280
+ def get_items_struct2cnt(cls, items, **kwargs):
281
+ # 1 统计每种结构出现的次数
282
+ struct2cnt = Counter()
283
+ for item in items:
284
+ pathstr = cls.get_item_pathstr(item, **kwargs)
285
+ struct2cnt[pathstr] += 1
286
+ # 2 按照从多到少排序
287
+ struct2cnt = Counter(dict(sorted(struct2cnt.items(), key=lambda item: -item[1])))
288
+ return struct2cnt
289
+
290
+ @classmethod
291
+ def get_items_structdf(cls, items, **kwargs):
292
+ """ 分析一组题目里,出现了多少种不同的json结构 """
293
+ # 1 获取原始数据,初始化
294
+ struct2cnt = cls.get_items_struct2cnt(items, **kwargs)
295
+ m = len(struct2cnt)
296
+
297
+ # 2 path2cnt
298
+ path2cnt = Counter()
299
+ for struct in struct2cnt.keys():
300
+ path2cnt.update({path: struct2cnt[struct] for path in struct.splitlines()})
301
+ paths = sorted(path2cnt.keys(), key=lambda path: re.split(r'/=', path))
302
+ path2cnt = {path: path2cnt[path] for path in paths}
303
+
304
+ # 3 生成统计表
305
+ ls = []
306
+ columns = ['path', 'total'] + [f'struct{i}' for i in range(1, m + 1)]
307
+ for path, cnt in path2cnt.items():
308
+ row = [path, cnt]
309
+ for struct, cnt in struct2cnt.items():
310
+ t = cnt if path in struct else 0
311
+ row.append(t)
312
+ ls.append(row)
313
+
314
+ df = pd.DataFrame.from_records(ls, columns=columns)
315
+ return df
316
+
317
+ @classmethod
318
+ def get_itemgroups_structdf(cls, itemgroups, **kwargs):
319
+ """ 分析不同套数据间的json结构区别
320
+
321
+ 这里为了减少冗余开发,直接复用get_items_structdf
322
+ 虽然会造成一些冗余功能,
323
+ """
324
+ # 1 统计所有paths出现情况
325
+ n = len(itemgroups)
326
+ d = dict()
327
+ for i, gs in enumerate(itemgroups):
328
+ for x in gs:
329
+ paths = cls.get_item_pathlist(x, **kwargs)
330
+ for p in paths:
331
+ if p not in d:
332
+ d[p] = [0] * n
333
+ d[p][i] += 1
334
+
335
+ # 排序
336
+ paths = sorted(d.keys(), key=lambda path: re.split(r'/=', path))
337
+
338
+ # 2 统计表
339
+ ls = []
340
+ columns = ['path', 'total'] + [f'group{i}' for i in range(1, n + 1)]
341
+ for path in paths:
342
+ vals = d[path]
343
+ row = [path, sum(vals)] + vals
344
+ ls.append(row)
345
+
346
+ df = pd.DataFrame.from_records(ls, columns=columns)
347
+ return df