re-common 10.0.39__py3-none-any.whl → 10.0.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (221) hide show
  1. re_common/baselibrary/__init__.py +4 -4
  2. re_common/baselibrary/baseabs/__init__.py +6 -6
  3. re_common/baselibrary/baseabs/baseabs.py +26 -26
  4. re_common/baselibrary/database/mbuilder.py +132 -132
  5. re_common/baselibrary/database/moudle.py +93 -93
  6. re_common/baselibrary/database/msqlite3.py +194 -194
  7. re_common/baselibrary/database/mysql.py +169 -169
  8. re_common/baselibrary/database/sql_factory.py +26 -26
  9. re_common/baselibrary/mthread/MThreadingRun.py +486 -486
  10. re_common/baselibrary/mthread/MThreadingRunEvent.py +349 -349
  11. re_common/baselibrary/mthread/__init__.py +2 -2
  12. re_common/baselibrary/mthread/mythreading.py +695 -695
  13. re_common/baselibrary/pakge_other/socks.py +404 -404
  14. re_common/baselibrary/readconfig/config_factory.py +18 -18
  15. re_common/baselibrary/readconfig/ini_config.py +317 -317
  16. re_common/baselibrary/readconfig/toml_config.py +49 -49
  17. re_common/baselibrary/temporary/envdata.py +36 -36
  18. re_common/baselibrary/tools/all_requests/aiohttp_request.py +118 -118
  19. re_common/baselibrary/tools/all_requests/httpx_requet.py +102 -102
  20. re_common/baselibrary/tools/all_requests/mrequest.py +412 -412
  21. re_common/baselibrary/tools/all_requests/requests_request.py +81 -81
  22. re_common/baselibrary/tools/batch_compre/bijiao_batch.py +31 -31
  23. re_common/baselibrary/tools/contrast_db3.py +123 -123
  24. re_common/baselibrary/tools/copy_file.py +39 -39
  25. re_common/baselibrary/tools/db3_2_sizedb3.py +102 -102
  26. re_common/baselibrary/tools/foreachgz.py +39 -39
  27. re_common/baselibrary/tools/get_attr.py +10 -10
  28. re_common/baselibrary/tools/image_to_pdf.py +61 -61
  29. re_common/baselibrary/tools/java_code_deal.py +139 -139
  30. re_common/baselibrary/tools/javacode.py +79 -79
  31. re_common/baselibrary/tools/mdb_db3.py +48 -48
  32. re_common/baselibrary/tools/merge_file.py +171 -171
  33. re_common/baselibrary/tools/merge_gz_file.py +165 -165
  34. re_common/baselibrary/tools/mhdfstools/down_hdfs_files.py +42 -42
  35. re_common/baselibrary/tools/mhdfstools/hdfst.py +42 -42
  36. re_common/baselibrary/tools/mhdfstools/up_hdfs_files.py +38 -38
  37. re_common/baselibrary/tools/mongo_tools.py +50 -50
  38. re_common/baselibrary/tools/move_file.py +170 -170
  39. re_common/baselibrary/tools/move_mongo/mongo_table_to_file.py +63 -63
  40. re_common/baselibrary/tools/move_mongo/move_mongo_table.py +354 -354
  41. re_common/baselibrary/tools/move_mongo/use_mttf.py +18 -18
  42. re_common/baselibrary/tools/move_mongo/use_mv.py +93 -93
  43. re_common/baselibrary/tools/mpandas/mpandasreadexcel.py +125 -125
  44. re_common/baselibrary/tools/mpandas/pandas_visualization.py +7 -7
  45. re_common/baselibrary/tools/myparsel.py +104 -104
  46. re_common/baselibrary/tools/rename_dir_file.py +37 -37
  47. re_common/baselibrary/tools/sequoiadb_utils.py +398 -398
  48. re_common/baselibrary/tools/split_line_to_many.py +25 -25
  49. re_common/baselibrary/tools/stringtodicts.py +33 -33
  50. re_common/baselibrary/tools/workwechant_bot.py +84 -84
  51. re_common/baselibrary/utils/baseaiohttp.py +296 -296
  52. re_common/baselibrary/utils/baseaiomysql.py +87 -87
  53. re_common/baselibrary/utils/baseallstep.py +191 -191
  54. re_common/baselibrary/utils/baseavro.py +19 -19
  55. re_common/baselibrary/utils/baseboto3.py +291 -291
  56. re_common/baselibrary/utils/basecsv.py +32 -32
  57. re_common/baselibrary/utils/basedict.py +133 -133
  58. re_common/baselibrary/utils/basedir.py +241 -241
  59. re_common/baselibrary/utils/baseencode.py +351 -351
  60. re_common/baselibrary/utils/baseencoding.py +28 -28
  61. re_common/baselibrary/utils/baseesdsl.py +86 -86
  62. re_common/baselibrary/utils/baseexcel.py +264 -264
  63. re_common/baselibrary/utils/baseexcept.py +109 -109
  64. re_common/baselibrary/utils/basefile.py +654 -654
  65. re_common/baselibrary/utils/baseftp.py +214 -214
  66. re_common/baselibrary/utils/basegzip.py +60 -60
  67. re_common/baselibrary/utils/basehdfs.py +135 -135
  68. re_common/baselibrary/utils/basehttpx.py +268 -268
  69. re_common/baselibrary/utils/baseip.py +87 -87
  70. re_common/baselibrary/utils/basejson.py +2 -2
  71. re_common/baselibrary/utils/baselist.py +32 -32
  72. re_common/baselibrary/utils/basemotor.py +190 -190
  73. re_common/baselibrary/utils/basemssql.py +98 -98
  74. re_common/baselibrary/utils/baseodbc.py +113 -113
  75. re_common/baselibrary/utils/basepandas.py +302 -302
  76. re_common/baselibrary/utils/basepeewee.py +11 -11
  77. re_common/baselibrary/utils/basepika.py +180 -180
  78. re_common/baselibrary/utils/basepydash.py +143 -143
  79. re_common/baselibrary/utils/basepymongo.py +230 -230
  80. re_common/baselibrary/utils/basequeue.py +22 -22
  81. re_common/baselibrary/utils/baserar.py +57 -57
  82. re_common/baselibrary/utils/baserequest.py +279 -279
  83. re_common/baselibrary/utils/baseset.py +8 -8
  84. re_common/baselibrary/utils/basesmb.py +403 -403
  85. re_common/baselibrary/utils/basestring.py +382 -382
  86. re_common/baselibrary/utils/basetime.py +320 -320
  87. re_common/baselibrary/utils/baseurl.py +121 -121
  88. re_common/baselibrary/utils/basezip.py +57 -57
  89. re_common/baselibrary/utils/core/__init__.py +7 -7
  90. re_common/baselibrary/utils/core/bottomutils.py +18 -18
  91. re_common/baselibrary/utils/core/mdeprecated.py +327 -327
  92. re_common/baselibrary/utils/core/mlamada.py +16 -16
  93. re_common/baselibrary/utils/core/msginfo.py +25 -25
  94. re_common/baselibrary/utils/core/requests_core.py +103 -103
  95. re_common/baselibrary/utils/fateadm.py +429 -429
  96. re_common/baselibrary/utils/importfun.py +123 -123
  97. re_common/baselibrary/utils/mfaker.py +57 -57
  98. re_common/baselibrary/utils/my_abc/__init__.py +3 -3
  99. re_common/baselibrary/utils/my_abc/better_abc.py +32 -32
  100. re_common/baselibrary/utils/mylogger.py +414 -414
  101. re_common/baselibrary/utils/myredisclient.py +861 -861
  102. re_common/baselibrary/utils/pipupgrade.py +21 -21
  103. re_common/baselibrary/utils/ringlist.py +85 -85
  104. re_common/baselibrary/utils/version_compare.py +36 -36
  105. re_common/baselibrary/utils/ydmhttp.py +126 -126
  106. re_common/facade/lazy_import.py +11 -11
  107. re_common/facade/loggerfacade.py +25 -25
  108. re_common/facade/mysqlfacade.py +467 -467
  109. re_common/facade/now.py +31 -31
  110. re_common/facade/sqlite3facade.py +257 -257
  111. re_common/facade/use/mq_use_facade.py +83 -83
  112. re_common/facade/use/proxy_use_facade.py +19 -19
  113. re_common/libtest/base_dict_test.py +19 -19
  114. re_common/libtest/baseavro_test.py +13 -13
  115. re_common/libtest/basefile_test.py +14 -14
  116. re_common/libtest/basemssql_test.py +77 -77
  117. re_common/libtest/baseodbc_test.py +7 -7
  118. re_common/libtest/basepandas_test.py +38 -38
  119. re_common/libtest/get_attr_test/get_attr_test_settings.py +14 -14
  120. re_common/libtest/get_attr_test/settings.py +54 -54
  121. re_common/libtest/idencode_test.py +53 -53
  122. re_common/libtest/iniconfig_test.py +35 -35
  123. re_common/libtest/ip_test.py +34 -34
  124. re_common/libtest/merge_file_test.py +20 -20
  125. re_common/libtest/mfaker_test.py +8 -8
  126. re_common/libtest/mm3_test.py +31 -31
  127. re_common/libtest/mylogger_test.py +88 -88
  128. re_common/libtest/myparsel_test.py +27 -27
  129. re_common/libtest/mysql_test.py +151 -151
  130. re_common/libtest/pymongo_test.py +21 -21
  131. re_common/libtest/split_test.py +11 -11
  132. re_common/libtest/sqlite3_merge_test.py +5 -5
  133. re_common/libtest/sqlite3_test.py +34 -34
  134. re_common/libtest/tomlconfig_test.py +30 -30
  135. re_common/libtest/use_tools_test/__init__.py +2 -2
  136. re_common/libtest/user/__init__.py +4 -4
  137. re_common/studio/__init__.py +4 -4
  138. re_common/studio/assignment_expressions.py +36 -36
  139. re_common/studio/mydash/test1.py +18 -18
  140. re_common/studio/pydashstudio/first.py +9 -9
  141. re_common/studio/streamlitstudio/first_app.py +65 -65
  142. re_common/studio/streamlitstudio/uber_pickups.py +23 -23
  143. re_common/studio/test.py +18 -18
  144. re_common/v2/baselibrary/business_utils/BusinessStringUtil.py +235 -220
  145. re_common/v2/baselibrary/business_utils/baseencodeid.py +100 -100
  146. re_common/v2/baselibrary/business_utils/full_doi_path.py +116 -116
  147. re_common/v2/baselibrary/business_utils/rel_tools.py +6 -6
  148. re_common/v2/baselibrary/decorators/utils.py +59 -59
  149. re_common/v2/baselibrary/helpers/search_packge/NearestNeighbors_test.py +105 -105
  150. re_common/v2/baselibrary/helpers/search_packge/fit_text_match.py +253 -253
  151. re_common/v2/baselibrary/helpers/search_packge/scikit_learn_text_matcher.py +260 -260
  152. re_common/v2/baselibrary/helpers/search_packge/test.py +1 -1
  153. re_common/v2/baselibrary/s3object/baseboto3.py +230 -230
  154. re_common/v2/baselibrary/tools/WeChatRobot.py +95 -95
  155. re_common/v2/baselibrary/tools/ac_ahocorasick.py +75 -75
  156. re_common/v2/baselibrary/tools/concurrency.py +35 -35
  157. re_common/v2/baselibrary/tools/data_processer/base.py +53 -53
  158. re_common/v2/baselibrary/tools/data_processer/data_processer.py +497 -508
  159. re_common/v2/baselibrary/tools/data_processer/data_reader.py +187 -187
  160. re_common/v2/baselibrary/tools/data_processer/data_writer.py +38 -38
  161. re_common/v2/baselibrary/tools/dict_tools.py +44 -44
  162. re_common/v2/baselibrary/tools/dolphinscheduler.py +187 -187
  163. re_common/v2/baselibrary/tools/hdfs_base_processor.py +204 -204
  164. re_common/v2/baselibrary/tools/hdfs_bulk_processor.py +67 -67
  165. re_common/v2/baselibrary/tools/hdfs_data_processer.py +338 -338
  166. re_common/v2/baselibrary/tools/hdfs_line_processor.py +74 -74
  167. re_common/v2/baselibrary/tools/list_tools.py +69 -69
  168. re_common/v2/baselibrary/tools/resume_tracker.py +94 -94
  169. re_common/v2/baselibrary/tools/search_hash_tools.py +54 -54
  170. re_common/v2/baselibrary/tools/text_matcher.py +326 -326
  171. re_common/v2/baselibrary/tools/tree_processor/__init__.py +0 -0
  172. re_common/v2/baselibrary/tools/tree_processor/builder.py +25 -0
  173. re_common/v2/baselibrary/tools/tree_processor/node.py +13 -0
  174. re_common/v2/baselibrary/tools/unionfind_tools.py +60 -60
  175. re_common/v2/baselibrary/utils/BusinessStringUtil.py +196 -196
  176. re_common/v2/baselibrary/utils/api_net_utils.py +270 -270
  177. re_common/v2/baselibrary/utils/author_smi.py +361 -361
  178. re_common/v2/baselibrary/utils/base_string_similarity.py +158 -158
  179. re_common/v2/baselibrary/utils/basedict.py +37 -37
  180. re_common/v2/baselibrary/utils/basehdfs.py +163 -163
  181. re_common/v2/baselibrary/utils/basepika.py +180 -180
  182. re_common/v2/baselibrary/utils/basetime.py +94 -77
  183. re_common/v2/baselibrary/utils/db.py +174 -156
  184. re_common/v2/baselibrary/utils/elasticsearch.py +46 -0
  185. re_common/v2/baselibrary/utils/json_cls.py +16 -16
  186. re_common/v2/baselibrary/utils/mq.py +83 -83
  187. re_common/v2/baselibrary/utils/n_ary_expression_tree.py +243 -243
  188. re_common/v2/baselibrary/utils/string_bool.py +187 -186
  189. re_common/v2/baselibrary/utils/string_clear.py +246 -246
  190. re_common/v2/baselibrary/utils/string_smi.py +18 -18
  191. re_common/v2/baselibrary/utils/stringutils.py +312 -271
  192. re_common/vip/base_step_process.py +11 -11
  193. re_common/vip/baseencodeid.py +90 -90
  194. re_common/vip/changetaskname.py +28 -28
  195. re_common/vip/core_var.py +24 -24
  196. re_common/vip/mmh3Hash.py +89 -89
  197. re_common/vip/proxy/allproxys.py +127 -127
  198. re_common/vip/proxy/allproxys_thread.py +159 -159
  199. re_common/vip/proxy/cnki_proxy.py +153 -153
  200. re_common/vip/proxy/kuaidaili.py +87 -87
  201. re_common/vip/proxy/proxy_all.py +113 -113
  202. re_common/vip/proxy/update_kuaidaili_0.py +42 -42
  203. re_common/vip/proxy/wanfang_proxy.py +152 -152
  204. re_common/vip/proxy/wp_proxy_all.py +181 -181
  205. re_common/vip/read_rawid_to_txt.py +91 -91
  206. re_common/vip/title/__init__.py +5 -5
  207. re_common/vip/title/transform/TransformBookTitleToZt.py +125 -125
  208. re_common/vip/title/transform/TransformConferenceTitleToZt.py +139 -139
  209. re_common/vip/title/transform/TransformCstadTitleToZt.py +195 -195
  210. re_common/vip/title/transform/TransformJournalTitleToZt.py +203 -203
  211. re_common/vip/title/transform/TransformPatentTitleToZt.py +132 -132
  212. re_common/vip/title/transform/TransformRegulationTitleToZt.py +114 -114
  213. re_common/vip/title/transform/TransformStandardTitleToZt.py +135 -135
  214. re_common/vip/title/transform/TransformThesisTitleToZt.py +135 -135
  215. re_common/vip/title/transform/__init__.py +10 -10
  216. {re_common-10.0.39.dist-info → re_common-10.0.41.dist-info}/LICENSE +201 -201
  217. {re_common-10.0.39.dist-info → re_common-10.0.41.dist-info}/METADATA +16 -16
  218. re_common-10.0.41.dist-info/RECORD +252 -0
  219. {re_common-10.0.39.dist-info → re_common-10.0.41.dist-info}/WHEEL +1 -1
  220. re_common-10.0.39.dist-info/RECORD +0 -248
  221. {re_common-10.0.39.dist-info → re_common-10.0.41.dist-info}/top_level.txt +0 -0
@@ -1,74 +1,74 @@
1
- import asyncio
2
-
3
- from pathlib import Path
4
- import time
5
- from typing import Any, Awaitable, Callable
6
- from re_common.v2.baselibrary.tools.concurrency import AsyncTaskPool
7
- from re_common.v2.baselibrary.tools.hdfs_base_processor import HDFSBaseProcessor
8
-
9
-
10
- class HDFSLineProcessor(HDFSBaseProcessor):
11
- async def _process_data(self, data, process_func, pool):
12
- """处理数据并执行处理函数"""
13
- retry_count = 0
14
- while retry_count < self.retries:
15
- try:
16
- return await process_func(data, pool) # 成功处理后退出
17
- except Exception as e:
18
- retry_count += 1
19
- print(f"处理数据时发生错误: {e}, 正在重试 {retry_count}/{self.retries}, data: {data}")
20
- await asyncio.sleep(2**retry_count)
21
- raise Exception(f"处理数据失败, 达到重试上限, data: {data}")
22
-
23
- async def _process_file(self, hdfs_file_path, process_func):
24
- """处理单个 gz 文件"""
25
- start_time = time.perf_counter()
26
- total_lines = self._count_total_lines(hdfs_file_path)
27
- processed_lines = 0
28
- pool = await self._get_pool()
29
- results = []
30
-
31
- for lines in self._batch_read_gz(hdfs_file_path):
32
- processing_start_time = time.perf_counter() # 记录本批处理开始时间
33
-
34
- tasks = [self._process_data(line, process_func, pool) for line in lines]
35
- results.extend(await AsyncTaskPool(self.concurrency).run(tasks))
36
-
37
- processed_lines += len(lines)
38
-
39
- self._print_progress(hdfs_file_path, processed_lines, total_lines, start_time, processing_start_time)
40
-
41
- if self.result_dir is not None:
42
- self.client.write(
43
- self.result_dir.rstrip("/") + f"/{Path(hdfs_file_path).stem}",
44
- data=self._generate_write_data(results),
45
- overwrite=True,
46
- encoding=self.encoding,
47
- )
48
-
49
- # 最终进度显示
50
- self._print_final_progress(hdfs_file_path, processed_lines, total_lines, start_time)
51
-
52
- async def map(self, process_func: Callable[[str, Any], Awaitable[Any]]) -> None:
53
- gz_files = self._list_gz_files()
54
- await self._run_multi_process(gz_files, process_func)
55
-
56
-
57
- # async def test_func(line: str, pool):
58
- # pass
59
-
60
-
61
- # async def main():
62
- # await HDFSLineProcessor(
63
- # "/xx/xx",
64
- # db_path=Path(__file__).parent / "test.db",
65
- # concurrency=200,
66
- # batch_size=1000,
67
- # pool_factory=get_pool,
68
- # max_processes=2,
69
- # result_dir="/xx/xx_res",
70
- # ).map(test_func)
71
-
72
-
73
- # if __name__ == "__main__":
74
- # asyncio.run(main())
1
+ import asyncio
2
+
3
+ from pathlib import Path
4
+ import time
5
+ from typing import Any, Awaitable, Callable
6
+ from re_common.v2.baselibrary.tools.concurrency import AsyncTaskPool
7
+ from re_common.v2.baselibrary.tools.hdfs_base_processor import HDFSBaseProcessor
8
+
9
+
10
+ class HDFSLineProcessor(HDFSBaseProcessor):
11
+ async def _process_data(self, data, process_func, pool):
12
+ """处理数据并执行处理函数"""
13
+ retry_count = 0
14
+ while retry_count < self.retries:
15
+ try:
16
+ return await process_func(data, pool) # 成功处理后退出
17
+ except Exception as e:
18
+ retry_count += 1
19
+ print(f"处理数据时发生错误: {e}, 正在重试 {retry_count}/{self.retries}, data: {data}")
20
+ await asyncio.sleep(2**retry_count)
21
+ raise Exception(f"处理数据失败, 达到重试上限, data: {data}")
22
+
23
+ async def _process_file(self, hdfs_file_path, process_func):
24
+ """处理单个 gz 文件"""
25
+ start_time = time.perf_counter()
26
+ total_lines = self._count_total_lines(hdfs_file_path)
27
+ processed_lines = 0
28
+ pool = await self._get_pool()
29
+ results = []
30
+
31
+ for lines in self._batch_read_gz(hdfs_file_path):
32
+ processing_start_time = time.perf_counter() # 记录本批处理开始时间
33
+
34
+ tasks = [self._process_data(line, process_func, pool) for line in lines]
35
+ results.extend(await AsyncTaskPool(self.concurrency).run(tasks))
36
+
37
+ processed_lines += len(lines)
38
+
39
+ self._print_progress(hdfs_file_path, processed_lines, total_lines, start_time, processing_start_time)
40
+
41
+ if self.result_dir is not None:
42
+ self.client.write(
43
+ self.result_dir.rstrip("/") + f"/{Path(hdfs_file_path).stem}",
44
+ data=self._generate_write_data(results),
45
+ overwrite=True,
46
+ encoding=self.encoding,
47
+ )
48
+
49
+ # 最终进度显示
50
+ self._print_final_progress(hdfs_file_path, processed_lines, total_lines, start_time)
51
+
52
+ async def map(self, process_func: Callable[[str, Any], Awaitable[Any]]) -> None:
53
+ gz_files = self._list_gz_files()
54
+ await self._run_multi_process(gz_files, process_func)
55
+
56
+
57
+ # async def test_func(line: str, pool):
58
+ # pass
59
+
60
+
61
+ # async def main():
62
+ # await HDFSLineProcessor(
63
+ # "/xx/xx",
64
+ # db_path=Path(__file__).parent / "test.db",
65
+ # concurrency=200,
66
+ # batch_size=1000,
67
+ # pool_factory=get_pool,
68
+ # max_processes=2,
69
+ # result_dir="/xx/xx_res",
70
+ # ).map(test_func)
71
+
72
+
73
+ # if __name__ == "__main__":
74
+ # asyncio.run(main())
@@ -1,70 +1,70 @@
1
- import itertools
2
- from typing import List, Any, Tuple
3
-
4
-
5
- def check_no_duplicates_2d(lst_2d):
6
- """
7
- 检查二维列表的每一行是否无重复
8
- 如果有重复值 返回 False
9
- 如果没有重复 返回True
10
- """
11
- for row in lst_2d:
12
- # 将行转为集合,比较长度
13
- if len(row) != len(set(row)):
14
- return False
15
- return True
16
-
17
-
18
- def generate_cross_list_combinations(lists: List[List[Any]]) -> List[Tuple[Any, Any]]:
19
- """
20
- 生成不同列表间的所有两两组合(元组长度为2)
21
-
22
- 参数:
23
- lists: 包含多个列表的列表,例如 [[1,2], ['a','b'], ['x','y']]
24
-
25
- 返回:
26
- 包含所有跨列表两两组合的列表,每个组合是一个元组
27
- 例如 [(1,'a'), (1,'b'), (2,'a'), ..., ('a','x'), ('a','y'), ...]
28
- """
29
- combinations = []
30
- for i in range(len(lists)):
31
- for j in range(i + 1, len(lists)):
32
- combinations.extend(itertools.product(lists[i], lists[j]))
33
- return combinations
34
-
35
-
36
- def filter_and_sort_by_smi(all_list, top_n=1000):
37
-
38
- """
39
- 要求 list 里面第一个是比较大小的数据 第二个是实际数据
40
- """
41
-
42
- # 1. 去重:按 doc_id 去重,保留 smi 最大的记录
43
- unique_dict = {}
44
- for smi, doc_id in all_list:
45
- if doc_id not in unique_dict or smi > unique_dict[doc_id][0]:
46
- unique_dict[doc_id] = (smi, doc_id)
47
-
48
- # 2. 转换为列表并排序
49
- unique_list = sorted(unique_dict.values(), key=lambda x: x[0], reverse=True)
50
-
51
- # 3. 取前 top_n 个
52
- return unique_list[:top_n]
53
-
54
-
55
- def list_to_dict(list_data,key_name):
56
- # 使用 defaultdict 来处理重复 id
57
- from collections import defaultdict
58
-
59
- dict_data = defaultdict(list)
60
-
61
- for item in list_data:
62
- dict_data[item[key_name]].append(item)
63
-
64
- # 将 defaultdict 转换成普通字典
65
- dict_data = dict(dict_data)
66
- return dict_data
67
-
68
- def split_list_by_step(lst, step=100):
69
- # 一维列表按照步长转换成二维列表
1
+ import itertools
2
+ from typing import List, Any, Tuple
3
+
4
+
5
+ def check_no_duplicates_2d(lst_2d):
6
+ """
7
+ 检查二维列表的每一行是否无重复
8
+ 如果有重复值 返回 False
9
+ 如果没有重复 返回True
10
+ """
11
+ for row in lst_2d:
12
+ # 将行转为集合,比较长度
13
+ if len(row) != len(set(row)):
14
+ return False
15
+ return True
16
+
17
+
18
+ def generate_cross_list_combinations(lists: List[List[Any]]) -> List[Tuple[Any, Any]]:
19
+ """
20
+ 生成不同列表间的所有两两组合(元组长度为2)
21
+
22
+ 参数:
23
+ lists: 包含多个列表的列表,例如 [[1,2], ['a','b'], ['x','y']]
24
+
25
+ 返回:
26
+ 包含所有跨列表两两组合的列表,每个组合是一个元组
27
+ 例如 [(1,'a'), (1,'b'), (2,'a'), ..., ('a','x'), ('a','y'), ...]
28
+ """
29
+ combinations = []
30
+ for i in range(len(lists)):
31
+ for j in range(i + 1, len(lists)):
32
+ combinations.extend(itertools.product(lists[i], lists[j]))
33
+ return combinations
34
+
35
+
36
+ def filter_and_sort_by_smi(all_list, top_n=1000):
37
+
38
+ """
39
+ 要求 list 里面第一个是比较大小的数据 第二个是实际数据
40
+ """
41
+
42
+ # 1. 去重:按 doc_id 去重,保留 smi 最大的记录
43
+ unique_dict = {}
44
+ for smi, doc_id in all_list:
45
+ if doc_id not in unique_dict or smi > unique_dict[doc_id][0]:
46
+ unique_dict[doc_id] = (smi, doc_id)
47
+
48
+ # 2. 转换为列表并排序
49
+ unique_list = sorted(unique_dict.values(), key=lambda x: x[0], reverse=True)
50
+
51
+ # 3. 取前 top_n 个
52
+ return unique_list[:top_n]
53
+
54
+
55
+ def list_to_dict(list_data,key_name):
56
+ # 使用 defaultdict 来处理重复 id
57
+ from collections import defaultdict
58
+
59
+ dict_data = defaultdict(list)
60
+
61
+ for item in list_data:
62
+ dict_data[item[key_name]].append(item)
63
+
64
+ # 将 defaultdict 转换成普通字典
65
+ dict_data = dict(dict_data)
66
+ return dict_data
67
+
68
+ def split_list_by_step(lst, step=100):
69
+ # 一维列表按照步长转换成二维列表
70
70
  return [lst[i:i + step] for i in range(0, len(lst), step)]
@@ -1,94 +1,94 @@
1
- import logging
2
- from pathlib import Path
3
- import sqlite3
4
- from typing import Iterable, List, Literal, Union
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
-
9
- class ResumeTracker:
10
- def __init__(
11
- self,
12
- db_path: Union[str, Path] = "processed.db",
13
- timeout: float = 10.0,
14
- isolation_level: Union[Literal["DEFERRED", "EXCLUSIVE", "IMMEDIATE"], None] = "DEFERRED",
15
- ):
16
- self.db_path = Path(db_path)
17
- self.timeout = timeout
18
- self.isolation_level = isolation_level
19
- self.init_db()
20
-
21
- def _get_connection(self) -> sqlite3.Connection:
22
- """创建数据库连接"""
23
- return sqlite3.connect(self.db_path, timeout=self.timeout, isolation_level=self.isolation_level)
24
-
25
- def init_db(self):
26
- with self._get_connection() as conn:
27
- cursor = conn.cursor()
28
- cursor.execute("""
29
- CREATE TABLE IF NOT EXISTS processed_items (
30
- item_key TEXT PRIMARY KEY
31
- )
32
- """)
33
- conn.commit()
34
-
35
- def is_processed(self, item_key: str) -> bool:
36
- with self._get_connection() as conn:
37
- cursor = conn.cursor()
38
- cursor.execute(
39
- "SELECT 1 FROM processed_items WHERE item_key = ?",
40
- (item_key,),
41
- )
42
- return cursor.fetchone() is not None
43
-
44
- def mark_processed(self, item_key: str):
45
- with self._get_connection() as conn:
46
- cursor = conn.cursor()
47
- cursor.execute(
48
- "INSERT OR IGNORE INTO processed_items (item_key) VALUES (?)",
49
- (item_key,),
50
- )
51
- conn.commit()
52
-
53
- def mark_many_processed(self, item_keys: Iterable[str]):
54
- with self._get_connection() as conn:
55
- cursor = conn.cursor()
56
- cursor.executemany(
57
- "INSERT OR IGNORE INTO processed_items (item_key) VALUES (?)",
58
- [(key,) for key in item_keys],
59
- )
60
- conn.commit()
61
-
62
- def get_processed_count(self) -> int:
63
- with self._get_connection() as conn:
64
- cursor = conn.cursor()
65
- cursor.execute("SELECT COUNT(*) FROM processed_items")
66
- return cursor.fetchone()[0]
67
-
68
- def get_processed_items(self) -> List[str]:
69
- with self._get_connection() as conn:
70
- cursor = conn.cursor()
71
- cursor.execute("SELECT item_key FROM processed_items")
72
- return [row[0] for row in cursor.fetchall()]
73
-
74
- def clear_processed_items(self):
75
- with self._get_connection() as conn:
76
- conn.execute("DELETE FROM processed_items")
77
- logger.info("Cleared all processed items")
78
-
79
-
80
- if __name__ == "__main__":
81
- tracker = ResumeTracker()
82
- # 测试标记功能
83
- tracker.mark_processed("test_key")
84
- print(f"Is 'test_key' processed? {tracker.is_processed('test_key')}")
85
-
86
- # 批量处理示例
87
- test_keys = [f"key_{i}" for i in range(1, 10000)]
88
- tracker.mark_many_processed(test_keys)
89
-
90
- # 显示处理计数
91
- print(f"Total processed items: {tracker.get_processed_count()}")
92
-
93
- # 清理测试数据
94
- tracker.clear_processed_items()
1
+ import logging
2
+ from pathlib import Path
3
+ import sqlite3
4
+ from typing import Iterable, List, Literal, Union
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+
9
+ class ResumeTracker:
10
+ def __init__(
11
+ self,
12
+ db_path: Union[str, Path] = "processed.db",
13
+ timeout: float = 10.0,
14
+ isolation_level: Union[Literal["DEFERRED", "EXCLUSIVE", "IMMEDIATE"], None] = "DEFERRED",
15
+ ):
16
+ self.db_path = Path(db_path)
17
+ self.timeout = timeout
18
+ self.isolation_level = isolation_level
19
+ self.init_db()
20
+
21
+ def _get_connection(self) -> sqlite3.Connection:
22
+ """创建数据库连接"""
23
+ return sqlite3.connect(self.db_path, timeout=self.timeout, isolation_level=self.isolation_level)
24
+
25
+ def init_db(self):
26
+ with self._get_connection() as conn:
27
+ cursor = conn.cursor()
28
+ cursor.execute("""
29
+ CREATE TABLE IF NOT EXISTS processed_items (
30
+ item_key TEXT PRIMARY KEY
31
+ )
32
+ """)
33
+ conn.commit()
34
+
35
+ def is_processed(self, item_key: str) -> bool:
36
+ with self._get_connection() as conn:
37
+ cursor = conn.cursor()
38
+ cursor.execute(
39
+ "SELECT 1 FROM processed_items WHERE item_key = ?",
40
+ (item_key,),
41
+ )
42
+ return cursor.fetchone() is not None
43
+
44
+ def mark_processed(self, item_key: str):
45
+ with self._get_connection() as conn:
46
+ cursor = conn.cursor()
47
+ cursor.execute(
48
+ "INSERT OR IGNORE INTO processed_items (item_key) VALUES (?)",
49
+ (item_key,),
50
+ )
51
+ conn.commit()
52
+
53
+ def mark_many_processed(self, item_keys: Iterable[str]):
54
+ with self._get_connection() as conn:
55
+ cursor = conn.cursor()
56
+ cursor.executemany(
57
+ "INSERT OR IGNORE INTO processed_items (item_key) VALUES (?)",
58
+ [(key,) for key in item_keys],
59
+ )
60
+ conn.commit()
61
+
62
+ def get_processed_count(self) -> int:
63
+ with self._get_connection() as conn:
64
+ cursor = conn.cursor()
65
+ cursor.execute("SELECT COUNT(*) FROM processed_items")
66
+ return cursor.fetchone()[0]
67
+
68
+ def get_processed_items(self) -> List[str]:
69
+ with self._get_connection() as conn:
70
+ cursor = conn.cursor()
71
+ cursor.execute("SELECT item_key FROM processed_items")
72
+ return [row[0] for row in cursor.fetchall()]
73
+
74
+ def clear_processed_items(self):
75
+ with self._get_connection() as conn:
76
+ conn.execute("DELETE FROM processed_items")
77
+ logger.info("Cleared all processed items")
78
+
79
+
80
+ if __name__ == "__main__":
81
+ tracker = ResumeTracker()
82
+ # 测试标记功能
83
+ tracker.mark_processed("test_key")
84
+ print(f"Is 'test_key' processed? {tracker.is_processed('test_key')}")
85
+
86
+ # 批量处理示例
87
+ test_keys = [f"key_{i}" for i in range(1, 10000)]
88
+ tracker.mark_many_processed(test_keys)
89
+
90
+ # 显示处理计数
91
+ print(f"Total processed items: {tracker.get_processed_count()}")
92
+
93
+ # 清理测试数据
94
+ tracker.clear_processed_items()
@@ -1,54 +1,54 @@
1
- from typing import List
2
-
3
- import jieba
4
- from datasketch import MinHash, minhash
5
-
6
- from re_common.v2.baselibrary.decorators.utils import deprecated
7
- from re_common.v2.baselibrary.utils.string_bool import is_single_cjk_char
8
-
9
- @deprecated("请使用 TextMatcherV2 中的方法代替。")
10
- def tokenize(text: str, stopwords=None) -> List[str]:
11
- """
12
- 分词并移除停用词
13
- """
14
- if stopwords is None:
15
- stopwords = []
16
- words = jieba.lcut(text)
17
-
18
- # 统计单字符数据 长度,防止结巴分词分不了的单词 将数据分为单个字符
19
-
20
- # 这里为什么使用函数 而不是在推导式中兼容,主要是在一些 spark中 推导式的if 条件不遵循最短路径原则会将表达式当做一个整体算子
21
- def is_singel_en(i):
22
- if len(i) == 1 and not is_single_cjk_char(i):
23
- return True
24
- return False
25
-
26
- one_char_size = len([i for i in words if is_singel_en(i)])
27
- all_size = len(words)
28
- # 如果单字符个数超过一定比例 就直接用空格分词
29
- if all_size != 0 and one_char_size / all_size > 0.6:
30
- words = [i for i in text.split() if i.strip()]
31
-
32
- # 过滤停用词和空字符
33
- words = [w for w in words if w not in stopwords and w.strip()]
34
- return words
35
-
36
- @deprecated("请使用 TextMatcherV2 中的方法代替。")
37
- def create_minhash(words: List[str], num_perm=128) -> MinHash:
38
- """
39
- 为分词结果创建 MinHash
40
- """
41
- minhash = MinHash(num_perm=num_perm)
42
- for word in words:
43
- minhash.update(word.encode("utf-8"))
44
- return minhash
45
-
46
- @deprecated("请使用 TextMatcherV2 中的方法代替。")
47
- def get_str_minhash(title):
48
- from re_common.v2.baselibrary.utils.string_clear import rel_clear
49
- rel_title = rel_clear(title)
50
- if not rel_title:
51
- return ""
52
- words = tokenize(rel_title)
53
- minhash = create_minhash(words)
54
- return minhash
1
+ from typing import List
2
+
3
+ import jieba
4
+ from datasketch import MinHash, minhash
5
+
6
+ from re_common.v2.baselibrary.decorators.utils import deprecated
7
+ from re_common.v2.baselibrary.utils.string_bool import is_single_cjk_char
8
+
9
+ @deprecated("请使用 TextMatcherV2 中的方法代替。")
10
+ def tokenize(text: str, stopwords=None) -> List[str]:
11
+ """
12
+ 分词并移除停用词
13
+ """
14
+ if stopwords is None:
15
+ stopwords = []
16
+ words = jieba.lcut(text)
17
+
18
+ # 统计单字符数据 长度,防止结巴分词分不了的单词 将数据分为单个字符
19
+
20
+ # 这里为什么使用函数 而不是在推导式中兼容,主要是在一些 spark中 推导式的if 条件不遵循最短路径原则会将表达式当做一个整体算子
21
+ def is_singel_en(i):
22
+ if len(i) == 1 and not is_single_cjk_char(i):
23
+ return True
24
+ return False
25
+
26
+ one_char_size = len([i for i in words if is_singel_en(i)])
27
+ all_size = len(words)
28
+ # 如果单字符个数超过一定比例 就直接用空格分词
29
+ if all_size != 0 and one_char_size / all_size > 0.6:
30
+ words = [i for i in text.split() if i.strip()]
31
+
32
+ # 过滤停用词和空字符
33
+ words = [w for w in words if w not in stopwords and w.strip()]
34
+ return words
35
+
36
+ @deprecated("请使用 TextMatcherV2 中的方法代替。")
37
+ def create_minhash(words: List[str], num_perm=128) -> MinHash:
38
+ """
39
+ 为分词结果创建 MinHash
40
+ """
41
+ minhash = MinHash(num_perm=num_perm)
42
+ for word in words:
43
+ minhash.update(word.encode("utf-8"))
44
+ return minhash
45
+
46
+ @deprecated("请使用 TextMatcherV2 中的方法代替。")
47
+ def get_str_minhash(title):
48
+ from re_common.v2.baselibrary.utils.string_clear import rel_clear
49
+ rel_title = rel_clear(title)
50
+ if not rel_title:
51
+ return ""
52
+ words = tokenize(rel_title)
53
+ minhash = create_minhash(words)
54
+ return minhash