pyxllib 0.3.96__py3-none-any.whl → 0.3.200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (358) hide show
  1. pyxllib/__init__.py +21 -21
  2. pyxllib/algo/__init__.py +8 -8
  3. pyxllib/algo/disjoint.py +54 -54
  4. pyxllib/algo/geo.py +541 -529
  5. pyxllib/algo/intervals.py +964 -964
  6. pyxllib/algo/matcher.py +389 -311
  7. pyxllib/algo/newbie.py +166 -166
  8. pyxllib/algo/pupil.py +629 -461
  9. pyxllib/algo/shapelylib.py +67 -67
  10. pyxllib/algo/specialist.py +241 -240
  11. pyxllib/algo/stat.py +494 -458
  12. pyxllib/algo/treelib.py +149 -149
  13. pyxllib/algo/unitlib.py +66 -66
  14. {pyxlpr → pyxllib/autogui}/__init__.py +5 -5
  15. pyxllib/autogui/activewin.py +246 -0
  16. pyxllib/autogui/all.py +9 -0
  17. pyxllib/{ext/autogui → autogui}/autogui.py +852 -823
  18. pyxllib/autogui/uiautolib.py +362 -0
  19. pyxllib/{ext/autogui → autogui}/virtualkey.py +102 -102
  20. pyxllib/autogui/wechat.py +827 -0
  21. pyxllib/autogui/wechat_msg.py +421 -0
  22. pyxllib/autogui/wxautolib.py +84 -0
  23. pyxllib/cv/__init__.py +5 -5
  24. pyxllib/cv/expert.py +267 -267
  25. pyxllib/cv/imfile.py +159 -159
  26. pyxllib/cv/imhash.py +39 -39
  27. pyxllib/cv/pupil.py +9 -9
  28. pyxllib/cv/rgbfmt.py +1525 -1525
  29. pyxllib/cv/slidercaptcha.py +137 -0
  30. pyxllib/cv/trackbartools.py +251 -251
  31. pyxllib/cv/xlcvlib.py +1040 -1040
  32. pyxllib/cv/xlpillib.py +423 -423
  33. pyxllib/data/echarts.py +240 -129
  34. pyxllib/data/jsonlib.py +89 -0
  35. pyxllib/data/oss.py +72 -72
  36. pyxllib/data/pglib.py +1127 -643
  37. pyxllib/data/sqlite.py +568 -341
  38. pyxllib/data/sqllib.py +297 -297
  39. pyxllib/ext/JLineViewer.py +505 -492
  40. pyxllib/ext/__init__.py +6 -6
  41. pyxllib/ext/demolib.py +246 -246
  42. pyxllib/ext/drissionlib.py +277 -0
  43. pyxllib/ext/kq5034lib.py +12 -1606
  44. pyxllib/ext/old.py +663 -663
  45. pyxllib/ext/qt.py +449 -449
  46. pyxllib/ext/robustprocfile.py +497 -0
  47. pyxllib/ext/seleniumlib.py +76 -76
  48. pyxllib/ext/tk.py +173 -173
  49. pyxllib/ext/unixlib.py +827 -826
  50. pyxllib/ext/utools.py +351 -338
  51. pyxllib/ext/webhook.py +124 -101
  52. pyxllib/ext/win32lib.py +40 -40
  53. pyxllib/ext/wjxlib.py +88 -0
  54. pyxllib/ext/wpsapi.py +124 -0
  55. pyxllib/ext/xlwork.py +9 -0
  56. pyxllib/ext/yuquelib.py +1105 -173
  57. pyxllib/file/__init__.py +17 -17
  58. pyxllib/file/docxlib.py +761 -761
  59. pyxllib/file/gitlib.py +309 -309
  60. pyxllib/file/libreoffice.py +165 -0
  61. pyxllib/file/movielib.py +148 -139
  62. pyxllib/file/newbie.py +10 -10
  63. pyxllib/file/onenotelib.py +1469 -1469
  64. pyxllib/file/packlib/__init__.py +330 -293
  65. pyxllib/file/packlib/zipfile.py +2441 -2441
  66. pyxllib/file/pdflib.py +426 -426
  67. pyxllib/file/pupil.py +185 -185
  68. pyxllib/file/specialist/__init__.py +685 -685
  69. pyxllib/file/specialist/dirlib.py +799 -799
  70. pyxllib/file/specialist/download.py +193 -186
  71. pyxllib/file/specialist/filelib.py +2829 -2618
  72. pyxllib/file/xlsxlib.py +3131 -2976
  73. pyxllib/file/xlsyncfile.py +341 -0
  74. pyxllib/prog/__init__.py +5 -5
  75. pyxllib/prog/cachetools.py +64 -0
  76. pyxllib/prog/deprecatedlib.py +233 -233
  77. pyxllib/prog/filelock.py +42 -0
  78. pyxllib/prog/ipyexec.py +253 -253
  79. pyxllib/prog/multiprogs.py +940 -0
  80. pyxllib/prog/newbie.py +451 -444
  81. pyxllib/prog/pupil.py +1197 -1128
  82. pyxllib/prog/sitepackages.py +33 -33
  83. pyxllib/prog/specialist/__init__.py +391 -217
  84. pyxllib/prog/specialist/bc.py +203 -200
  85. pyxllib/prog/specialist/browser.py +497 -488
  86. pyxllib/prog/specialist/common.py +347 -347
  87. pyxllib/prog/specialist/datetime.py +199 -131
  88. pyxllib/prog/specialist/tictoc.py +240 -241
  89. pyxllib/prog/specialist/xllog.py +180 -180
  90. pyxllib/prog/xlosenv.py +108 -101
  91. pyxllib/stdlib/__init__.py +17 -17
  92. pyxllib/stdlib/tablepyxl/__init__.py +10 -10
  93. pyxllib/stdlib/tablepyxl/style.py +303 -303
  94. pyxllib/stdlib/tablepyxl/tablepyxl.py +130 -130
  95. pyxllib/text/__init__.py +8 -8
  96. pyxllib/text/ahocorasick.py +39 -39
  97. pyxllib/text/airscript.js +744 -0
  98. pyxllib/text/charclasslib.py +121 -109
  99. pyxllib/text/jiebalib.py +267 -264
  100. pyxllib/text/jinjalib.py +32 -0
  101. pyxllib/text/jsa_ai_prompt.md +271 -0
  102. pyxllib/text/jscode.py +922 -767
  103. pyxllib/text/latex/__init__.py +158 -158
  104. pyxllib/text/levenshtein.py +303 -303
  105. pyxllib/text/nestenv.py +1215 -1215
  106. pyxllib/text/newbie.py +300 -288
  107. pyxllib/text/pupil/__init__.py +8 -8
  108. pyxllib/text/pupil/common.py +1121 -1095
  109. pyxllib/text/pupil/xlalign.py +326 -326
  110. pyxllib/text/pycode.py +47 -47
  111. pyxllib/text/specialist/__init__.py +8 -8
  112. pyxllib/text/specialist/common.py +112 -112
  113. pyxllib/text/specialist/ptag.py +186 -186
  114. pyxllib/text/spellchecker.py +172 -172
  115. pyxllib/text/templates/echart_base.html +11 -0
  116. pyxllib/text/templates/highlight_code.html +17 -0
  117. pyxllib/text/templates/latex_editor.html +103 -0
  118. pyxllib/text/vbacode.py +17 -17
  119. pyxllib/text/xmllib.py +747 -685
  120. pyxllib/xl.py +42 -38
  121. pyxllib/xlcv.py +17 -17
  122. pyxllib-0.3.200.dist-info/METADATA +48 -0
  123. pyxllib-0.3.200.dist-info/RECORD +126 -0
  124. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info}/WHEEL +1 -2
  125. {pyxllib-0.3.96.dist-info → pyxllib-0.3.200.dist-info/licenses}/LICENSE +190 -190
  126. pyxllib/ext/autogui/__init__.py +0 -8
  127. pyxllib-0.3.96.dist-info/METADATA +0 -51
  128. pyxllib-0.3.96.dist-info/RECORD +0 -333
  129. pyxllib-0.3.96.dist-info/top_level.txt +0 -2
  130. pyxlpr/ai/__init__.py +0 -5
  131. pyxlpr/ai/clientlib.py +0 -1281
  132. pyxlpr/ai/specialist.py +0 -286
  133. pyxlpr/ai/torch_app.py +0 -172
  134. pyxlpr/ai/xlpaddle.py +0 -655
  135. pyxlpr/ai/xltorch.py +0 -705
  136. pyxlpr/data/__init__.py +0 -11
  137. pyxlpr/data/coco.py +0 -1325
  138. pyxlpr/data/datacls.py +0 -365
  139. pyxlpr/data/datasets.py +0 -200
  140. pyxlpr/data/gptlib.py +0 -1291
  141. pyxlpr/data/icdar/__init__.py +0 -96
  142. pyxlpr/data/icdar/deteval.py +0 -377
  143. pyxlpr/data/icdar/icdar2013.py +0 -341
  144. pyxlpr/data/icdar/iou.py +0 -340
  145. pyxlpr/data/icdar/rrc_evaluation_funcs_1_1.py +0 -463
  146. pyxlpr/data/imtextline.py +0 -473
  147. pyxlpr/data/labelme.py +0 -866
  148. pyxlpr/data/removeline.py +0 -179
  149. pyxlpr/data/specialist.py +0 -57
  150. pyxlpr/eval/__init__.py +0 -85
  151. pyxlpr/paddleocr.py +0 -776
  152. pyxlpr/ppocr/__init__.py +0 -15
  153. pyxlpr/ppocr/configs/rec/multi_language/generate_multi_language_configs.py +0 -226
  154. pyxlpr/ppocr/data/__init__.py +0 -135
  155. pyxlpr/ppocr/data/imaug/ColorJitter.py +0 -26
  156. pyxlpr/ppocr/data/imaug/__init__.py +0 -67
  157. pyxlpr/ppocr/data/imaug/copy_paste.py +0 -170
  158. pyxlpr/ppocr/data/imaug/east_process.py +0 -437
  159. pyxlpr/ppocr/data/imaug/gen_table_mask.py +0 -244
  160. pyxlpr/ppocr/data/imaug/iaa_augment.py +0 -114
  161. pyxlpr/ppocr/data/imaug/label_ops.py +0 -789
  162. pyxlpr/ppocr/data/imaug/make_border_map.py +0 -184
  163. pyxlpr/ppocr/data/imaug/make_pse_gt.py +0 -106
  164. pyxlpr/ppocr/data/imaug/make_shrink_map.py +0 -126
  165. pyxlpr/ppocr/data/imaug/operators.py +0 -433
  166. pyxlpr/ppocr/data/imaug/pg_process.py +0 -906
  167. pyxlpr/ppocr/data/imaug/randaugment.py +0 -143
  168. pyxlpr/ppocr/data/imaug/random_crop_data.py +0 -239
  169. pyxlpr/ppocr/data/imaug/rec_img_aug.py +0 -533
  170. pyxlpr/ppocr/data/imaug/sast_process.py +0 -777
  171. pyxlpr/ppocr/data/imaug/text_image_aug/__init__.py +0 -17
  172. pyxlpr/ppocr/data/imaug/text_image_aug/augment.py +0 -120
  173. pyxlpr/ppocr/data/imaug/text_image_aug/warp_mls.py +0 -168
  174. pyxlpr/ppocr/data/lmdb_dataset.py +0 -115
  175. pyxlpr/ppocr/data/pgnet_dataset.py +0 -104
  176. pyxlpr/ppocr/data/pubtab_dataset.py +0 -107
  177. pyxlpr/ppocr/data/simple_dataset.py +0 -372
  178. pyxlpr/ppocr/losses/__init__.py +0 -61
  179. pyxlpr/ppocr/losses/ace_loss.py +0 -52
  180. pyxlpr/ppocr/losses/basic_loss.py +0 -135
  181. pyxlpr/ppocr/losses/center_loss.py +0 -88
  182. pyxlpr/ppocr/losses/cls_loss.py +0 -30
  183. pyxlpr/ppocr/losses/combined_loss.py +0 -67
  184. pyxlpr/ppocr/losses/det_basic_loss.py +0 -208
  185. pyxlpr/ppocr/losses/det_db_loss.py +0 -80
  186. pyxlpr/ppocr/losses/det_east_loss.py +0 -63
  187. pyxlpr/ppocr/losses/det_pse_loss.py +0 -149
  188. pyxlpr/ppocr/losses/det_sast_loss.py +0 -121
  189. pyxlpr/ppocr/losses/distillation_loss.py +0 -272
  190. pyxlpr/ppocr/losses/e2e_pg_loss.py +0 -140
  191. pyxlpr/ppocr/losses/kie_sdmgr_loss.py +0 -113
  192. pyxlpr/ppocr/losses/rec_aster_loss.py +0 -99
  193. pyxlpr/ppocr/losses/rec_att_loss.py +0 -39
  194. pyxlpr/ppocr/losses/rec_ctc_loss.py +0 -44
  195. pyxlpr/ppocr/losses/rec_enhanced_ctc_loss.py +0 -70
  196. pyxlpr/ppocr/losses/rec_nrtr_loss.py +0 -30
  197. pyxlpr/ppocr/losses/rec_sar_loss.py +0 -28
  198. pyxlpr/ppocr/losses/rec_srn_loss.py +0 -47
  199. pyxlpr/ppocr/losses/table_att_loss.py +0 -109
  200. pyxlpr/ppocr/metrics/__init__.py +0 -44
  201. pyxlpr/ppocr/metrics/cls_metric.py +0 -45
  202. pyxlpr/ppocr/metrics/det_metric.py +0 -82
  203. pyxlpr/ppocr/metrics/distillation_metric.py +0 -73
  204. pyxlpr/ppocr/metrics/e2e_metric.py +0 -86
  205. pyxlpr/ppocr/metrics/eval_det_iou.py +0 -274
  206. pyxlpr/ppocr/metrics/kie_metric.py +0 -70
  207. pyxlpr/ppocr/metrics/rec_metric.py +0 -75
  208. pyxlpr/ppocr/metrics/table_metric.py +0 -50
  209. pyxlpr/ppocr/modeling/architectures/__init__.py +0 -32
  210. pyxlpr/ppocr/modeling/architectures/base_model.py +0 -88
  211. pyxlpr/ppocr/modeling/architectures/distillation_model.py +0 -60
  212. pyxlpr/ppocr/modeling/backbones/__init__.py +0 -54
  213. pyxlpr/ppocr/modeling/backbones/det_mobilenet_v3.py +0 -268
  214. pyxlpr/ppocr/modeling/backbones/det_resnet_vd.py +0 -246
  215. pyxlpr/ppocr/modeling/backbones/det_resnet_vd_sast.py +0 -285
  216. pyxlpr/ppocr/modeling/backbones/e2e_resnet_vd_pg.py +0 -265
  217. pyxlpr/ppocr/modeling/backbones/kie_unet_sdmgr.py +0 -186
  218. pyxlpr/ppocr/modeling/backbones/rec_mobilenet_v3.py +0 -138
  219. pyxlpr/ppocr/modeling/backbones/rec_mv1_enhance.py +0 -258
  220. pyxlpr/ppocr/modeling/backbones/rec_nrtr_mtb.py +0 -48
  221. pyxlpr/ppocr/modeling/backbones/rec_resnet_31.py +0 -210
  222. pyxlpr/ppocr/modeling/backbones/rec_resnet_aster.py +0 -143
  223. pyxlpr/ppocr/modeling/backbones/rec_resnet_fpn.py +0 -307
  224. pyxlpr/ppocr/modeling/backbones/rec_resnet_vd.py +0 -286
  225. pyxlpr/ppocr/modeling/heads/__init__.py +0 -54
  226. pyxlpr/ppocr/modeling/heads/cls_head.py +0 -52
  227. pyxlpr/ppocr/modeling/heads/det_db_head.py +0 -118
  228. pyxlpr/ppocr/modeling/heads/det_east_head.py +0 -121
  229. pyxlpr/ppocr/modeling/heads/det_pse_head.py +0 -37
  230. pyxlpr/ppocr/modeling/heads/det_sast_head.py +0 -128
  231. pyxlpr/ppocr/modeling/heads/e2e_pg_head.py +0 -253
  232. pyxlpr/ppocr/modeling/heads/kie_sdmgr_head.py +0 -206
  233. pyxlpr/ppocr/modeling/heads/multiheadAttention.py +0 -163
  234. pyxlpr/ppocr/modeling/heads/rec_aster_head.py +0 -393
  235. pyxlpr/ppocr/modeling/heads/rec_att_head.py +0 -202
  236. pyxlpr/ppocr/modeling/heads/rec_ctc_head.py +0 -88
  237. pyxlpr/ppocr/modeling/heads/rec_nrtr_head.py +0 -826
  238. pyxlpr/ppocr/modeling/heads/rec_sar_head.py +0 -402
  239. pyxlpr/ppocr/modeling/heads/rec_srn_head.py +0 -280
  240. pyxlpr/ppocr/modeling/heads/self_attention.py +0 -406
  241. pyxlpr/ppocr/modeling/heads/table_att_head.py +0 -246
  242. pyxlpr/ppocr/modeling/necks/__init__.py +0 -32
  243. pyxlpr/ppocr/modeling/necks/db_fpn.py +0 -111
  244. pyxlpr/ppocr/modeling/necks/east_fpn.py +0 -188
  245. pyxlpr/ppocr/modeling/necks/fpn.py +0 -138
  246. pyxlpr/ppocr/modeling/necks/pg_fpn.py +0 -314
  247. pyxlpr/ppocr/modeling/necks/rnn.py +0 -92
  248. pyxlpr/ppocr/modeling/necks/sast_fpn.py +0 -284
  249. pyxlpr/ppocr/modeling/necks/table_fpn.py +0 -110
  250. pyxlpr/ppocr/modeling/transforms/__init__.py +0 -28
  251. pyxlpr/ppocr/modeling/transforms/stn.py +0 -135
  252. pyxlpr/ppocr/modeling/transforms/tps.py +0 -308
  253. pyxlpr/ppocr/modeling/transforms/tps_spatial_transformer.py +0 -156
  254. pyxlpr/ppocr/optimizer/__init__.py +0 -61
  255. pyxlpr/ppocr/optimizer/learning_rate.py +0 -228
  256. pyxlpr/ppocr/optimizer/lr_scheduler.py +0 -49
  257. pyxlpr/ppocr/optimizer/optimizer.py +0 -160
  258. pyxlpr/ppocr/optimizer/regularizer.py +0 -52
  259. pyxlpr/ppocr/postprocess/__init__.py +0 -55
  260. pyxlpr/ppocr/postprocess/cls_postprocess.py +0 -33
  261. pyxlpr/ppocr/postprocess/db_postprocess.py +0 -234
  262. pyxlpr/ppocr/postprocess/east_postprocess.py +0 -143
  263. pyxlpr/ppocr/postprocess/locality_aware_nms.py +0 -200
  264. pyxlpr/ppocr/postprocess/pg_postprocess.py +0 -52
  265. pyxlpr/ppocr/postprocess/pse_postprocess/__init__.py +0 -15
  266. pyxlpr/ppocr/postprocess/pse_postprocess/pse/__init__.py +0 -29
  267. pyxlpr/ppocr/postprocess/pse_postprocess/pse/setup.py +0 -14
  268. pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py +0 -118
  269. pyxlpr/ppocr/postprocess/rec_postprocess.py +0 -654
  270. pyxlpr/ppocr/postprocess/sast_postprocess.py +0 -355
  271. pyxlpr/ppocr/tools/__init__.py +0 -14
  272. pyxlpr/ppocr/tools/eval.py +0 -83
  273. pyxlpr/ppocr/tools/export_center.py +0 -77
  274. pyxlpr/ppocr/tools/export_model.py +0 -129
  275. pyxlpr/ppocr/tools/infer/predict_cls.py +0 -151
  276. pyxlpr/ppocr/tools/infer/predict_det.py +0 -300
  277. pyxlpr/ppocr/tools/infer/predict_e2e.py +0 -169
  278. pyxlpr/ppocr/tools/infer/predict_rec.py +0 -414
  279. pyxlpr/ppocr/tools/infer/predict_system.py +0 -204
  280. pyxlpr/ppocr/tools/infer/utility.py +0 -629
  281. pyxlpr/ppocr/tools/infer_cls.py +0 -83
  282. pyxlpr/ppocr/tools/infer_det.py +0 -134
  283. pyxlpr/ppocr/tools/infer_e2e.py +0 -122
  284. pyxlpr/ppocr/tools/infer_kie.py +0 -153
  285. pyxlpr/ppocr/tools/infer_rec.py +0 -146
  286. pyxlpr/ppocr/tools/infer_table.py +0 -107
  287. pyxlpr/ppocr/tools/program.py +0 -596
  288. pyxlpr/ppocr/tools/test_hubserving.py +0 -117
  289. pyxlpr/ppocr/tools/train.py +0 -163
  290. pyxlpr/ppocr/tools/xlprog.py +0 -748
  291. pyxlpr/ppocr/utils/EN_symbol_dict.txt +0 -94
  292. pyxlpr/ppocr/utils/__init__.py +0 -24
  293. pyxlpr/ppocr/utils/dict/ar_dict.txt +0 -117
  294. pyxlpr/ppocr/utils/dict/arabic_dict.txt +0 -162
  295. pyxlpr/ppocr/utils/dict/be_dict.txt +0 -145
  296. pyxlpr/ppocr/utils/dict/bg_dict.txt +0 -140
  297. pyxlpr/ppocr/utils/dict/chinese_cht_dict.txt +0 -8421
  298. pyxlpr/ppocr/utils/dict/cyrillic_dict.txt +0 -163
  299. pyxlpr/ppocr/utils/dict/devanagari_dict.txt +0 -167
  300. pyxlpr/ppocr/utils/dict/en_dict.txt +0 -63
  301. pyxlpr/ppocr/utils/dict/fa_dict.txt +0 -136
  302. pyxlpr/ppocr/utils/dict/french_dict.txt +0 -136
  303. pyxlpr/ppocr/utils/dict/german_dict.txt +0 -143
  304. pyxlpr/ppocr/utils/dict/hi_dict.txt +0 -162
  305. pyxlpr/ppocr/utils/dict/it_dict.txt +0 -118
  306. pyxlpr/ppocr/utils/dict/japan_dict.txt +0 -4399
  307. pyxlpr/ppocr/utils/dict/ka_dict.txt +0 -153
  308. pyxlpr/ppocr/utils/dict/korean_dict.txt +0 -3688
  309. pyxlpr/ppocr/utils/dict/latin_dict.txt +0 -185
  310. pyxlpr/ppocr/utils/dict/mr_dict.txt +0 -153
  311. pyxlpr/ppocr/utils/dict/ne_dict.txt +0 -153
  312. pyxlpr/ppocr/utils/dict/oc_dict.txt +0 -96
  313. pyxlpr/ppocr/utils/dict/pu_dict.txt +0 -130
  314. pyxlpr/ppocr/utils/dict/rs_dict.txt +0 -91
  315. pyxlpr/ppocr/utils/dict/rsc_dict.txt +0 -134
  316. pyxlpr/ppocr/utils/dict/ru_dict.txt +0 -125
  317. pyxlpr/ppocr/utils/dict/ta_dict.txt +0 -128
  318. pyxlpr/ppocr/utils/dict/table_dict.txt +0 -277
  319. pyxlpr/ppocr/utils/dict/table_structure_dict.txt +0 -2759
  320. pyxlpr/ppocr/utils/dict/te_dict.txt +0 -151
  321. pyxlpr/ppocr/utils/dict/ug_dict.txt +0 -114
  322. pyxlpr/ppocr/utils/dict/uk_dict.txt +0 -142
  323. pyxlpr/ppocr/utils/dict/ur_dict.txt +0 -137
  324. pyxlpr/ppocr/utils/dict/xi_dict.txt +0 -110
  325. pyxlpr/ppocr/utils/dict90.txt +0 -90
  326. pyxlpr/ppocr/utils/e2e_metric/Deteval.py +0 -574
  327. pyxlpr/ppocr/utils/e2e_metric/polygon_fast.py +0 -83
  328. pyxlpr/ppocr/utils/e2e_utils/extract_batchsize.py +0 -87
  329. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_fast.py +0 -457
  330. pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_slow.py +0 -592
  331. pyxlpr/ppocr/utils/e2e_utils/pgnet_pp_utils.py +0 -162
  332. pyxlpr/ppocr/utils/e2e_utils/visual.py +0 -162
  333. pyxlpr/ppocr/utils/en_dict.txt +0 -95
  334. pyxlpr/ppocr/utils/gen_label.py +0 -81
  335. pyxlpr/ppocr/utils/ic15_dict.txt +0 -36
  336. pyxlpr/ppocr/utils/iou.py +0 -54
  337. pyxlpr/ppocr/utils/logging.py +0 -69
  338. pyxlpr/ppocr/utils/network.py +0 -84
  339. pyxlpr/ppocr/utils/ppocr_keys_v1.txt +0 -6623
  340. pyxlpr/ppocr/utils/profiler.py +0 -110
  341. pyxlpr/ppocr/utils/save_load.py +0 -150
  342. pyxlpr/ppocr/utils/stats.py +0 -72
  343. pyxlpr/ppocr/utils/utility.py +0 -80
  344. pyxlpr/ppstructure/__init__.py +0 -13
  345. pyxlpr/ppstructure/predict_system.py +0 -187
  346. pyxlpr/ppstructure/table/__init__.py +0 -13
  347. pyxlpr/ppstructure/table/eval_table.py +0 -72
  348. pyxlpr/ppstructure/table/matcher.py +0 -192
  349. pyxlpr/ppstructure/table/predict_structure.py +0 -136
  350. pyxlpr/ppstructure/table/predict_table.py +0 -221
  351. pyxlpr/ppstructure/table/table_metric/__init__.py +0 -16
  352. pyxlpr/ppstructure/table/table_metric/parallel.py +0 -51
  353. pyxlpr/ppstructure/table/table_metric/table_metric.py +0 -247
  354. pyxlpr/ppstructure/table/tablepyxl/__init__.py +0 -13
  355. pyxlpr/ppstructure/table/tablepyxl/style.py +0 -283
  356. pyxlpr/ppstructure/table/tablepyxl/tablepyxl.py +0 -118
  357. pyxlpr/ppstructure/utility.py +0 -71
  358. pyxlpr/xlai.py +0 -10
pyxllib/data/sqllib.py CHANGED
@@ -1,297 +1,297 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # @Author : 陈坤泽
4
- # @Email : 877362867@qq.com
5
- # @Date : 2020/06/03 09:52
6
-
7
- from pyxllib.prog.pupil import check_install_package
8
-
9
- check_install_package('bidict')
10
- check_install_package('sqlalchemy')
11
- check_install_package('mysqlclient')
12
-
13
- import math
14
-
15
- from bidict import bidict
16
- import pandas as pd
17
- import sqlalchemy
18
-
19
- from pyxllib.file.specialist import File
20
-
21
- SQL_LIB_ACCOUNT_FILE = File(__file__).parent / 'sqllibaccount.pkl'
22
-
23
-
24
- def create_account_df(file='sqllibaccount.pkl'):
25
- """请在这里设置您个人的账户密码,并在运行完后,销毁明文信息"""
26
- df = pd.DataFrame.from_records([
27
- ['ckz', 'rm.sbsql.rds.aliyuncs.com', '', '', 'dddddd'],
28
- ['ckzlocal', '0.0.0.0', '', '', 'eeeeee'],
29
- ], columns=['index_name', 'host', 'port', 'user', 'passwd'])
30
- df['port'] = df['port'].replace('', '3306') # 没写端口的默认值
31
- df['user'] = df['user'].replace('', 'root') # 没写用户名的默认值
32
- df['passwd'] = df['passwd'].replace('', '123456') # 没写密码的默认值
33
- df.set_index('index_name', inplace=True)
34
- File(file).write(df)
35
-
36
-
37
- class SqlEngine:
38
- """mysql 通用基础类
39
- """
40
-
41
- def __init__(self, alias=None, database=None, *,
42
- user='root', passwd='123456', host=None, port='3306',
43
- connect_timeout=None, account_file_path=None):
44
- """ 初始化需要连接数据库
45
-
46
- :param alias: 数据库的简化别名,为了方便快速调用及隐藏明文密码
47
- 使用该参数将会覆盖掉已有的user、passwd、host、port参数值
48
- 例如我自己设置的别名有:
49
- ckz,我自己阿里云上的个人数据库
50
- ckzlocal,本PC开的数据库
51
- :param account_file_path: 使用alias时才有效
52
- 该参数指定存储账号信息的pkl文件所在位置,注意pkl的格式必须用类似下述的代码方式生成
53
- 默认从与该脚本同目录下的 sqllibaccount.pkl 文件获取
54
-
55
- :param database: 数据库名称
56
- 例如在快乐做教研时一些相关数据库名:
57
- tr,教研
58
- tr_develop,教研开发数据
59
- tr_test,教研测试数据
60
-
61
- :param connect_timeout: 连接超时时等待秒数
62
- 如果设置,建议2秒以上
63
-
64
- :return:
65
- """
66
-
67
- # 1 读取地址、账号信息
68
- if alias:
69
- if account_file_path is None:
70
- account_file_path = File(SQL_LIB_ACCOUNT_FILE)
71
- # dprint(alias,account_file_path)
72
- record = File(account_file_path).read().loc[alias] # 从文件读取账号信息
73
- user, passwd, host, port = record.user, record.passwd, record.host, record.port
74
-
75
- # 2 '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'
76
- address = f'mysql+mysqldb://{user}:{passwd}@{host}:{port}/{database}?charset=utf8mb4'
77
- # 3 存储成员
78
- self.alias, self.database = alias, database
79
- connect_args = {"connect_timeout": connect_timeout} if connect_timeout else {}
80
- self.engine = sqlalchemy.create_engine(address, connect_args=connect_args)
81
-
82
- def query(self, sql, index_col=None, coerce_float=True, params=None,
83
- parse_dates=None, columns=None, chunksize=None):
84
- """本质上就是pd.read_sql函数
85
-
86
- pd.read_sql()知道这些就够用了 - 漫步量化 - CSDN博客:
87
- https://blog.csdn.net/The_Time_Runner/article/details/86601988
88
-
89
- 官方文档:pandas.read_sql — pandas 0.25.1 documentation:
90
- https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html
91
-
92
- :param sql: 命令字符串,可以写%s设置
93
- 这里我做了扩展:
94
- 单语句: '...'
95
- 多语句: ['...', '...'],默认\n隔开
96
- :param index_col: 设为索引列
97
- :param coerce_float: 设为float类型的列
98
- :param params: 和sql命令相关,具体语法规则与所用的引擎相关,例如我这里是用sqlalchemy,支持用法
99
- 'SELECT point_name FROM tr_point LIMIT %s', params=(5,)) # list、tuple等列举每个%s的值
100
- 'SELECT point_name FROM tr_point LIMIT %(n)s', params={'n', 5}) # 是用dict关联命名参数
101
- :param parse_dates: 转为datetime类型的列
102
- :param columns: 要选取的列。一般没啥用,因为在sql命令里面一般就指定要选择的列了
103
- :param chunksize: 如果提供了一个整数值,那么就会返回一个generator,每次输出的行数就是提供的值的大小
104
- :return: DataFrame类型的数据
105
- """
106
- # 1 合并sql命令
107
- if isinstance(sql, str): sql = [sql]
108
- sql = '\n'.join(sql)
109
-
110
- # 含有 % 的特殊符号要转义
111
- # import sqlalchemy
112
- # sql = sqlalchemy.text(sql)
113
-
114
- # 2 解析结果
115
- res = pd.read_sql(sql, self.engine, index_col=index_col, coerce_float=coerce_float, params=params,
116
- parse_dates=parse_dates, columns=columns, chunksize=chunksize)
117
- return res
118
-
119
- def execute(self, statement, *multiparams, **params):
120
- """本质上就是sqlalchemy.con.execute的封装
121
-
122
- 可以这样使用:
123
- hsql.execute('UPDATE spell_check SET count=:count WHERE old=:old AND new=:new',
124
- count=count[0] + add, old=old, new=new)
125
- """
126
- # 1 解析sql命令
127
- if isinstance(statement, str): statement = [statement]
128
- statement = '\n'.join(statement)
129
- statement = sqlalchemy.text(statement)
130
-
131
- # 2 如果设置了getdf参数
132
- res = self.engine.execute(statement, *multiparams, **params)
133
- return res
134
-
135
- def insert_from_df(self, df, table_name, patch_size=100, if_exists='append'):
136
- """将df写入con数据库的table_name表格
137
-
138
- 190731周三18:51,TODO
139
- 可以先用:df.to_sql('formula_stat', HistudySQL('dev', 'tr_develop').con, if_exists='replace')
140
- 191017周四10:21,目前这函数改来改去,都还没严格测试呢~~
141
-
142
- 这个函数开发要参考:DataFrame.to_sql()
143
- 是因为其con参数好像不支持pymysql
144
-
145
- :param df: DataFrane类型表格数据
146
- :param table_name: 要写入的表格名
147
- :param patch_size: 每轮要写入的数量
148
- 如果df很大,是无法一次性用sql语句写入的,一般要分批写
149
- patch_size是设置每批导入多少条数据
150
- :param if_exists: {'fail', 'replace', 'append'}, default 'append'
151
- How to behave if the table already exists.
152
-
153
- * fail: Raise a ValueError.
154
- * replace: Drop the table before inserting new values.
155
- * append: Insert new values to the existing table.
156
- """
157
- con = self.engine
158
- # TODO 增加表格是否存在的判断;我这个函数本质上只能往已存在的表格插入数据
159
- if if_exists == 'append':
160
- pass
161
- elif if_exists == 'replace':
162
- con.query(f'TRUNCATE TABLE {table_name}')
163
- elif if_exists == 'fail':
164
- raise ValueError('表格已存在')
165
- else:
166
- raise NotImplementedError
167
-
168
- # 1 删除table中不支持的df的列
169
- cols = pd.read_sql(f'SHOW COLUMNS FROM {table_name}', con)['Field']
170
- cols = list(set(df.columns) & set(cols))
171
- df = df[cols]
172
-
173
- # 2 将df每一行数据转成mysql语句文本
174
- data = [] # data[i]是第i条数据的sql文本
175
-
176
- # 除了nan,bool值、None值都能正常转换
177
- def func(x):
178
- # s = con.escape(str(x))
179
- s = x
180
- if s == 'nan': s = 'NULL' # nan转为NULL
181
- return s
182
-
183
- for idx, row in df.iterrows():
184
- t = ', '.join(map(func, row))
185
- data.append('(' + t + ')')
186
-
187
- # 3 分批导入
188
- columns = '( ' + ', '.join(cols) + ' )'
189
- for j in range(0, math.ceil(len(data) / patch_size)):
190
- subdata = ',\n'.join(data[j * patch_size:(j + 1) * patch_size])
191
- con.execute("INSERT IGNORE INTO :a :b VALUES :c",
192
- a=table_name, b=columns, c=subdata)
193
- con.commit() # 更新后才会起作用
194
-
195
-
196
- class SqlCodeGenerator:
197
- @staticmethod
198
- def keys_count(table, keys):
199
- codes = [f'-- 分析{table}表中,{keys}出现的种类和次数,按照出现次数从多到少排序',
200
- f'SELECT {keys}, COUNT(*) cnt FROM {table} GROUP BY {keys} ORDER BY cnt DESC']
201
- return '\n'.join(codes)
202
-
203
- @staticmethod
204
- def one2many(table, keys, vars):
205
- codes = [f'-- 分析{table}表中,{keys}构成的键,对应{vars}构成的值,是否有一对多的关系,按多到少排序',
206
- f'SELECT {keys}, COUNT(DISTINCT {vars}) cnt',
207
- f'FROM {table} GROUP BY {keys}',
208
- 'HAVING cnt > 1 ORDER BY cnt DESC']
209
- return '\n'.join(codes)
210
-
211
-
212
- def demo_sqlengine():
213
- db = SqlEngine('ckz', 'runoob')
214
- df = db.query('SELECT * FROM apps')
215
- print(df)
216
-
217
-
218
- class MultiEnumTable:
219
- """多份枚举表的双向映射
220
- 目前是用来做数据库表中的枚举值映射,但实际可以通用于很多地方
221
-
222
- >>> met = MultiEnumTable()
223
- >>> met.add_enum_table('subject', [5, 8, 6], ['语文', '数学', '英语'])
224
- >>> met.add_enum_table_from_dict('grade', {1: '小学', 2: '初中', 3: '高中'})
225
-
226
- >>> met['subject'][6]
227
- '英语'
228
- >>> met['subject'].inverse['英语']
229
- 6
230
-
231
- >>> met.decode('subject', 5)
232
- '语文'
233
- >>> met.encode('subject', '数学')
234
- 8
235
-
236
- >>> met.decodes('grade', [1, 3, 3, 2, 1])
237
- ['小学', '高中', '高中', '初中', '小学']
238
- >>> met.encodes('grade', ['小学', '高中', '大学', '初中', '小学'])
239
- [1, 3, None, 2, 1]
240
- """
241
-
242
- def __init__(self):
243
- self.enum_tables = dict()
244
-
245
- def __getitem__(self, table):
246
- return self.enum_tables[table]
247
-
248
- def add_enum_table(self, table, ids, values):
249
- """增加一个映射表"""
250
- self.enum_tables[table] = bidict({k: v for k, v in zip(ids, values)})
251
-
252
- def add_enum_table_from_dict(self, table, d):
253
- self.enum_tables[table] = bidict({k: v for k, v in d.items()})
254
-
255
- def set_alias(self, table, alias):
256
- """已有table的其他alias别名
257
- :param alias: list
258
- """
259
- for a in alias:
260
- self.enum_tables[a] = self.enum_tables[table]
261
-
262
- def decode(self, table, id_, default=None):
263
- """转明文"""
264
- return self.enum_tables[table].get(id_, default)
265
-
266
- def encode(self, table, value, default=None):
267
- """转id"""
268
- return self.enum_tables[table].inverse.get(value, default)
269
-
270
- def decodes(self, table, ids, default=None):
271
- d = self.enum_tables[table]
272
- return [d.get(k, default) for k in ids]
273
-
274
- def encodes(self, table, values, default=None):
275
- d = self.enum_tables[table].inverse
276
- return [d.get(v, default) for v in values]
277
-
278
-
279
- def adjust_repeat_data(li, suffix='+'):
280
- """ 分析序列li里的值,对出现重复的值进行特殊标记去重
281
- :param li: list,每个元素值一般是str
282
- :param suffix: 通过增加什么后缀来去重
283
- :return: 新的无重复数值的li
284
-
285
- >>> adjust_repeat_data(['a', 'b', 'a', 'c'])
286
- ['a', 'b', 'a+', 'c']
287
- """
288
- res = []
289
- values = set()
290
- for x in li:
291
- while x in values:
292
- x += suffix
293
- # print(x)
294
- res.append(x)
295
- values.add(x)
296
-
297
- return res
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # @Author : 陈坤泽
4
+ # @Email : 877362867@qq.com
5
+ # @Date : 2020/06/03 09:52
6
+
7
+ from pyxllib.prog.pupil import check_install_package
8
+
9
+ check_install_package('bidict')
10
+ check_install_package('sqlalchemy')
11
+ check_install_package('mysqlclient')
12
+
13
+ import math
14
+
15
+ from bidict import bidict
16
+ import pandas as pd
17
+ import sqlalchemy
18
+
19
+ from pyxllib.file.specialist import File
20
+
21
+ SQL_LIB_ACCOUNT_FILE = File(__file__).parent / 'sqllibaccount.pkl'
22
+
23
+
24
+ def create_account_df(file='sqllibaccount.pkl'):
25
+ """请在这里设置您个人的账户密码,并在运行完后,销毁明文信息"""
26
+ df = pd.DataFrame.from_records([
27
+ ['ckz', 'rm.sbsql.rds.aliyuncs.com', '', '', 'dddddd'],
28
+ ['ckzlocal', '0.0.0.0', '', '', 'eeeeee'],
29
+ ], columns=['index_name', 'host', 'port', 'user', 'passwd'])
30
+ df['port'] = df['port'].replace('', '3306') # 没写端口的默认值
31
+ df['user'] = df['user'].replace('', 'root') # 没写用户名的默认值
32
+ df['passwd'] = df['passwd'].replace('', '123456') # 没写密码的默认值
33
+ df.set_index('index_name', inplace=True)
34
+ File(file).write(df)
35
+
36
+
37
+ class SqlEngine:
38
+ """mysql 通用基础类
39
+ """
40
+
41
+ def __init__(self, alias=None, database=None, *,
42
+ user='root', passwd='123456', host=None, port='3306',
43
+ connect_timeout=None, account_file_path=None):
44
+ """ 初始化需要连接数据库
45
+
46
+ :param alias: 数据库的简化别名,为了方便快速调用及隐藏明文密码
47
+ 使用该参数将会覆盖掉已有的user、passwd、host、port参数值
48
+ 例如我自己设置的别名有:
49
+ ckz,我自己阿里云上的个人数据库
50
+ ckzlocal,本PC开的数据库
51
+ :param account_file_path: 使用alias时才有效
52
+ 该参数指定存储账号信息的pkl文件所在位置,注意pkl的格式必须用类似下述的代码方式生成
53
+ 默认从与该脚本同目录下的 sqllibaccount.pkl 文件获取
54
+
55
+ :param database: 数据库名称
56
+ 例如在快乐做教研时一些相关数据库名:
57
+ tr,教研
58
+ tr_develop,教研开发数据
59
+ tr_test,教研测试数据
60
+
61
+ :param connect_timeout: 连接超时时等待秒数
62
+ 如果设置,建议2秒以上
63
+
64
+ :return:
65
+ """
66
+
67
+ # 1 读取地址、账号信息
68
+ if alias:
69
+ if account_file_path is None:
70
+ account_file_path = File(SQL_LIB_ACCOUNT_FILE)
71
+ # dprint(alias,account_file_path)
72
+ record = File(account_file_path).read().loc[alias] # 从文件读取账号信息
73
+ user, passwd, host, port = record.user, record.passwd, record.host, record.port
74
+
75
+ # 2 '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'
76
+ address = f'mysql+mysqldb://{user}:{passwd}@{host}:{port}/{database}?charset=utf8mb4'
77
+ # 3 存储成员
78
+ self.alias, self.database = alias, database
79
+ connect_args = {"connect_timeout": connect_timeout} if connect_timeout else {}
80
+ self.engine = sqlalchemy.create_engine(address, connect_args=connect_args)
81
+
82
+ def query(self, sql, index_col=None, coerce_float=True, params=None,
83
+ parse_dates=None, columns=None, chunksize=None):
84
+ """本质上就是pd.read_sql函数
85
+
86
+ pd.read_sql()知道这些就够用了 - 漫步量化 - CSDN博客:
87
+ https://blog.csdn.net/The_Time_Runner/article/details/86601988
88
+
89
+ 官方文档:pandas.read_sql — pandas 0.25.1 documentation:
90
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html
91
+
92
+ :param sql: 命令字符串,可以写%s设置
93
+ 这里我做了扩展:
94
+ 单语句: '...'
95
+ 多语句: ['...', '...'],默认\n隔开
96
+ :param index_col: 设为索引列
97
+ :param coerce_float: 设为float类型的列
98
+ :param params: 和sql命令相关,具体语法规则与所用的引擎相关,例如我这里是用sqlalchemy,支持用法
99
+ 'SELECT point_name FROM tr_point LIMIT %s', params=(5,)) # list、tuple等列举每个%s的值
100
+ 'SELECT point_name FROM tr_point LIMIT %(n)s', params={'n', 5}) # 是用dict关联命名参数
101
+ :param parse_dates: 转为datetime类型的列
102
+ :param columns: 要选取的列。一般没啥用,因为在sql命令里面一般就指定要选择的列了
103
+ :param chunksize: 如果提供了一个整数值,那么就会返回一个generator,每次输出的行数就是提供的值的大小
104
+ :return: DataFrame类型的数据
105
+ """
106
+ # 1 合并sql命令
107
+ if isinstance(sql, str): sql = [sql]
108
+ sql = '\n'.join(sql)
109
+
110
+ # 含有 % 的特殊符号要转义
111
+ # import sqlalchemy
112
+ # sql = sqlalchemy.text(sql)
113
+
114
+ # 2 解析结果
115
+ res = pd.read_sql(sql, self.engine, index_col=index_col, coerce_float=coerce_float, params=params,
116
+ parse_dates=parse_dates, columns=columns, chunksize=chunksize)
117
+ return res
118
+
119
+ def execute(self, statement, *multiparams, **params):
120
+ """本质上就是sqlalchemy.con.execute的封装
121
+
122
+ 可以这样使用:
123
+ hsql.execute('UPDATE spell_check SET count=:count WHERE old=:old AND new=:new',
124
+ count=count[0] + add, old=old, new=new)
125
+ """
126
+ # 1 解析sql命令
127
+ if isinstance(statement, str): statement = [statement]
128
+ statement = '\n'.join(statement)
129
+ statement = sqlalchemy.text(statement)
130
+
131
+ # 2 如果设置了getdf参数
132
+ res = self.engine.execute(statement, *multiparams, **params)
133
+ return res
134
+
135
+ def insert_from_df(self, df, table_name, patch_size=100, if_exists='append'):
136
+ """将df写入con数据库的table_name表格
137
+
138
+ 190731周三18:51,TODO
139
+ 可以先用:df.to_sql('formula_stat', HistudySQL('dev', 'tr_develop').con, if_exists='replace')
140
+ 191017周四10:21,目前这函数改来改去,都还没严格测试呢~~
141
+
142
+ 这个函数开发要参考:DataFrame.to_sql()
143
+ 是因为其con参数好像不支持pymysql
144
+
145
+ :param df: DataFrane类型表格数据
146
+ :param table_name: 要写入的表格名
147
+ :param patch_size: 每轮要写入的数量
148
+ 如果df很大,是无法一次性用sql语句写入的,一般要分批写
149
+ patch_size是设置每批导入多少条数据
150
+ :param if_exists: {'fail', 'replace', 'append'}, default 'append'
151
+ How to behave if the table already exists.
152
+
153
+ * fail: Raise a ValueError.
154
+ * replace: Drop the table before inserting new values.
155
+ * append: Insert new values to the existing table.
156
+ """
157
+ con = self.engine
158
+ # TODO 增加表格是否存在的判断;我这个函数本质上只能往已存在的表格插入数据
159
+ if if_exists == 'append':
160
+ pass
161
+ elif if_exists == 'replace':
162
+ con.query(f'TRUNCATE TABLE {table_name}')
163
+ elif if_exists == 'fail':
164
+ raise ValueError('表格已存在')
165
+ else:
166
+ raise NotImplementedError
167
+
168
+ # 1 删除table中不支持的df的列
169
+ cols = pd.read_sql(f'SHOW COLUMNS FROM {table_name}', con)['Field']
170
+ cols = list(set(df.columns) & set(cols))
171
+ df = df[cols]
172
+
173
+ # 2 将df每一行数据转成mysql语句文本
174
+ data = [] # data[i]是第i条数据的sql文本
175
+
176
+ # 除了nan,bool值、None值都能正常转换
177
+ def func(x):
178
+ # s = con.escape(str(x))
179
+ s = x
180
+ if s == 'nan': s = 'NULL' # nan转为NULL
181
+ return s
182
+
183
+ for idx, row in df.iterrows():
184
+ t = ', '.join(map(func, row))
185
+ data.append('(' + t + ')')
186
+
187
+ # 3 分批导入
188
+ columns = '( ' + ', '.join(cols) + ' )'
189
+ for j in range(0, math.ceil(len(data) / patch_size)):
190
+ subdata = ',\n'.join(data[j * patch_size:(j + 1) * patch_size])
191
+ con.execute("INSERT IGNORE INTO :a :b VALUES :c",
192
+ a=table_name, b=columns, c=subdata)
193
+ con.commit() # 更新后才会起作用
194
+
195
+
196
+ class SqlCodeGenerator:
197
+ @staticmethod
198
+ def keys_count(table, keys):
199
+ codes = [f'-- 分析{table}表中,{keys}出现的种类和次数,按照出现次数从多到少排序',
200
+ f'SELECT {keys}, COUNT(*) cnt FROM {table} GROUP BY {keys} ORDER BY cnt DESC']
201
+ return '\n'.join(codes)
202
+
203
+ @staticmethod
204
+ def one2many(table, keys, vars):
205
+ codes = [f'-- 分析{table}表中,{keys}构成的键,对应{vars}构成的值,是否有一对多的关系,按多到少排序',
206
+ f'SELECT {keys}, COUNT(DISTINCT {vars}) cnt',
207
+ f'FROM {table} GROUP BY {keys}',
208
+ 'HAVING cnt > 1 ORDER BY cnt DESC']
209
+ return '\n'.join(codes)
210
+
211
+
212
+ def demo_sqlengine():
213
+ db = SqlEngine('ckz', 'runoob')
214
+ df = db.query('SELECT * FROM apps')
215
+ print(df)
216
+
217
+
218
+ class MultiEnumTable:
219
+ """多份枚举表的双向映射
220
+ 目前是用来做数据库表中的枚举值映射,但实际可以通用于很多地方
221
+
222
+ >>> met = MultiEnumTable()
223
+ >>> met.add_enum_table('subject', [5, 8, 6], ['语文', '数学', '英语'])
224
+ >>> met.add_enum_table_from_dict('grade', {1: '小学', 2: '初中', 3: '高中'})
225
+
226
+ >>> met['subject'][6]
227
+ '英语'
228
+ >>> met['subject'].inverse['英语']
229
+ 6
230
+
231
+ >>> met.decode('subject', 5)
232
+ '语文'
233
+ >>> met.encode('subject', '数学')
234
+ 8
235
+
236
+ >>> met.decodes('grade', [1, 3, 3, 2, 1])
237
+ ['小学', '高中', '高中', '初中', '小学']
238
+ >>> met.encodes('grade', ['小学', '高中', '大学', '初中', '小学'])
239
+ [1, 3, None, 2, 1]
240
+ """
241
+
242
+ def __init__(self):
243
+ self.enum_tables = dict()
244
+
245
+ def __getitem__(self, table):
246
+ return self.enum_tables[table]
247
+
248
+ def add_enum_table(self, table, ids, values):
249
+ """增加一个映射表"""
250
+ self.enum_tables[table] = bidict({k: v for k, v in zip(ids, values)})
251
+
252
+ def add_enum_table_from_dict(self, table, d):
253
+ self.enum_tables[table] = bidict({k: v for k, v in d.items()})
254
+
255
+ def set_alias(self, table, alias):
256
+ """已有table的其他alias别名
257
+ :param alias: list
258
+ """
259
+ for a in alias:
260
+ self.enum_tables[a] = self.enum_tables[table]
261
+
262
+ def decode(self, table, id_, default=None):
263
+ """转明文"""
264
+ return self.enum_tables[table].get(id_, default)
265
+
266
+ def encode(self, table, value, default=None):
267
+ """转id"""
268
+ return self.enum_tables[table].inverse.get(value, default)
269
+
270
+ def decodes(self, table, ids, default=None):
271
+ d = self.enum_tables[table]
272
+ return [d.get(k, default) for k in ids]
273
+
274
+ def encodes(self, table, values, default=None):
275
+ d = self.enum_tables[table].inverse
276
+ return [d.get(v, default) for v in values]
277
+
278
+
279
+ def adjust_repeat_data(li, suffix='+'):
280
+ """ 分析序列li里的值,对出现重复的值进行特殊标记去重
281
+ :param li: list,每个元素值一般是str
282
+ :param suffix: 通过增加什么后缀来去重
283
+ :return: 新的无重复数值的li
284
+
285
+ >>> adjust_repeat_data(['a', 'b', 'a', 'c'])
286
+ ['a', 'b', 'a+', 'c']
287
+ """
288
+ res = []
289
+ values = set()
290
+ for x in li:
291
+ while x in values:
292
+ x += suffix
293
+ # print(x)
294
+ res.append(x)
295
+ values.add(x)
296
+
297
+ return res