crawlo 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (186) hide show
  1. crawlo/__init__.py +61 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/cli.py +40 -40
  8. crawlo/commands/__init__.py +13 -13
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/list.py +155 -155
  12. crawlo/commands/run.py +285 -285
  13. crawlo/commands/startproject.py +300 -196
  14. crawlo/commands/stats.py +188 -188
  15. crawlo/commands/utils.py +186 -186
  16. crawlo/config.py +309 -279
  17. crawlo/config_validator.py +253 -0
  18. crawlo/core/__init__.py +2 -2
  19. crawlo/core/engine.py +346 -172
  20. crawlo/core/processor.py +40 -40
  21. crawlo/core/scheduler.py +137 -166
  22. crawlo/crawler.py +1027 -1027
  23. crawlo/downloader/__init__.py +266 -242
  24. crawlo/downloader/aiohttp_downloader.py +220 -212
  25. crawlo/downloader/cffi_downloader.py +256 -251
  26. crawlo/downloader/httpx_downloader.py +259 -259
  27. crawlo/downloader/hybrid_downloader.py +214 -0
  28. crawlo/downloader/playwright_downloader.py +403 -0
  29. crawlo/downloader/selenium_downloader.py +473 -0
  30. crawlo/event.py +11 -11
  31. crawlo/exceptions.py +81 -81
  32. crawlo/extension/__init__.py +37 -37
  33. crawlo/extension/health_check.py +141 -141
  34. crawlo/extension/log_interval.py +57 -57
  35. crawlo/extension/log_stats.py +81 -81
  36. crawlo/extension/logging_extension.py +43 -43
  37. crawlo/extension/memory_monitor.py +104 -88
  38. crawlo/extension/performance_profiler.py +133 -117
  39. crawlo/extension/request_recorder.py +107 -107
  40. crawlo/filters/__init__.py +154 -154
  41. crawlo/filters/aioredis_filter.py +280 -242
  42. crawlo/filters/memory_filter.py +269 -269
  43. crawlo/items/__init__.py +23 -23
  44. crawlo/items/base.py +21 -21
  45. crawlo/items/fields.py +53 -53
  46. crawlo/items/items.py +104 -104
  47. crawlo/middleware/__init__.py +21 -21
  48. crawlo/middleware/default_header.py +32 -32
  49. crawlo/middleware/download_delay.py +28 -28
  50. crawlo/middleware/middleware_manager.py +135 -135
  51. crawlo/middleware/proxy.py +272 -248
  52. crawlo/middleware/request_ignore.py +30 -30
  53. crawlo/middleware/response_code.py +18 -18
  54. crawlo/middleware/response_filter.py +26 -26
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/mode_manager.py +206 -201
  57. crawlo/network/__init__.py +21 -21
  58. crawlo/network/request.py +338 -311
  59. crawlo/network/response.py +360 -271
  60. crawlo/pipelines/__init__.py +21 -21
  61. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  62. crawlo/pipelines/console_pipeline.py +39 -39
  63. crawlo/pipelines/csv_pipeline.py +316 -316
  64. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  65. crawlo/pipelines/json_pipeline.py +218 -218
  66. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  67. crawlo/pipelines/mongo_pipeline.py +131 -131
  68. crawlo/pipelines/mysql_pipeline.py +316 -316
  69. crawlo/pipelines/pipeline_manager.py +56 -56
  70. crawlo/pipelines/redis_dedup_pipeline.py +166 -162
  71. crawlo/project.py +153 -153
  72. crawlo/queue/pqueue.py +37 -37
  73. crawlo/queue/queue_manager.py +320 -307
  74. crawlo/queue/redis_priority_queue.py +277 -209
  75. crawlo/settings/__init__.py +7 -7
  76. crawlo/settings/default_settings.py +216 -278
  77. crawlo/settings/setting_manager.py +99 -99
  78. crawlo/spider/__init__.py +639 -639
  79. crawlo/stats_collector.py +59 -59
  80. crawlo/subscriber.py +130 -130
  81. crawlo/task_manager.py +30 -30
  82. crawlo/templates/crawlo.cfg.tmpl +10 -10
  83. crawlo/templates/project/__init__.py.tmpl +3 -3
  84. crawlo/templates/project/items.py.tmpl +17 -17
  85. crawlo/templates/project/middlewares.py.tmpl +110 -110
  86. crawlo/templates/project/pipelines.py.tmpl +97 -97
  87. crawlo/templates/project/run.py.tmpl +251 -251
  88. crawlo/templates/project/settings.py.tmpl +326 -279
  89. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  90. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  91. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  92. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  93. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  94. crawlo/templates/spider/spider.py.tmpl +141 -141
  95. crawlo/tools/__init__.py +183 -0
  96. crawlo/tools/anti_crawler.py +269 -0
  97. crawlo/tools/authenticated_proxy.py +241 -0
  98. crawlo/tools/data_validator.py +181 -0
  99. crawlo/tools/date_tools.py +36 -0
  100. crawlo/tools/distributed_coordinator.py +387 -0
  101. crawlo/tools/retry_mechanism.py +221 -0
  102. crawlo/tools/scenario_adapter.py +263 -0
  103. crawlo/utils/__init__.py +35 -7
  104. crawlo/utils/batch_processor.py +261 -0
  105. crawlo/utils/controlled_spider_mixin.py +439 -439
  106. crawlo/utils/date_tools.py +290 -233
  107. crawlo/utils/db_helper.py +343 -343
  108. crawlo/utils/enhanced_error_handler.py +360 -0
  109. crawlo/utils/env_config.py +106 -0
  110. crawlo/utils/error_handler.py +126 -0
  111. crawlo/utils/func_tools.py +82 -82
  112. crawlo/utils/large_scale_config.py +286 -286
  113. crawlo/utils/large_scale_helper.py +343 -343
  114. crawlo/utils/log.py +128 -128
  115. crawlo/utils/performance_monitor.py +285 -0
  116. crawlo/utils/queue_helper.py +175 -175
  117. crawlo/utils/redis_connection_pool.py +335 -0
  118. crawlo/utils/redis_key_validator.py +200 -0
  119. crawlo/utils/request.py +267 -267
  120. crawlo/utils/request_serializer.py +219 -219
  121. crawlo/utils/spider_loader.py +62 -62
  122. crawlo/utils/system.py +11 -11
  123. crawlo/utils/tools.py +4 -4
  124. crawlo/utils/url.py +39 -39
  125. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/METADATA +401 -403
  126. crawlo-1.1.5.dist-info/RECORD +185 -0
  127. examples/__init__.py +7 -7
  128. tests/__init__.py +7 -7
  129. tests/advanced_tools_example.py +276 -0
  130. tests/authenticated_proxy_example.py +237 -0
  131. tests/cleaners_example.py +161 -0
  132. tests/config_validation_demo.py +103 -0
  133. {examples → tests}/controlled_spider_example.py +205 -205
  134. tests/date_tools_example.py +181 -0
  135. tests/dynamic_loading_example.py +524 -0
  136. tests/dynamic_loading_test.py +105 -0
  137. tests/env_config_example.py +134 -0
  138. tests/error_handling_example.py +172 -0
  139. tests/redis_key_validation_demo.py +131 -0
  140. tests/response_improvements_example.py +145 -0
  141. tests/test_advanced_tools.py +149 -0
  142. tests/test_all_redis_key_configs.py +146 -0
  143. tests/test_authenticated_proxy.py +142 -0
  144. tests/test_cleaners.py +55 -0
  145. tests/test_comprehensive.py +147 -0
  146. tests/test_config_validator.py +194 -0
  147. tests/test_date_tools.py +124 -0
  148. tests/test_dynamic_downloaders_proxy.py +125 -0
  149. tests/test_dynamic_proxy.py +93 -0
  150. tests/test_dynamic_proxy_config.py +147 -0
  151. tests/test_dynamic_proxy_real.py +110 -0
  152. tests/test_edge_cases.py +304 -0
  153. tests/test_enhanced_error_handler.py +271 -0
  154. tests/test_env_config.py +122 -0
  155. tests/test_error_handler_compatibility.py +113 -0
  156. tests/test_final_validation.py +153 -153
  157. tests/test_framework_env_usage.py +104 -0
  158. tests/test_integration.py +357 -0
  159. tests/test_item_dedup_redis_key.py +123 -0
  160. tests/test_parsel.py +30 -0
  161. tests/test_performance.py +328 -0
  162. tests/test_proxy_health_check.py +32 -32
  163. tests/test_proxy_middleware_integration.py +136 -136
  164. tests/test_proxy_providers.py +56 -56
  165. tests/test_proxy_stats.py +19 -19
  166. tests/test_proxy_strategies.py +59 -59
  167. tests/test_queue_manager_redis_key.py +177 -0
  168. tests/test_redis_config.py +28 -28
  169. tests/test_redis_connection_pool.py +295 -0
  170. tests/test_redis_key_naming.py +182 -0
  171. tests/test_redis_key_validator.py +124 -0
  172. tests/test_redis_queue.py +224 -224
  173. tests/test_request_serialization.py +70 -70
  174. tests/test_response_improvements.py +153 -0
  175. tests/test_scheduler.py +241 -241
  176. tests/test_simple_response.py +62 -0
  177. tests/test_telecom_spider_redis_key.py +206 -0
  178. tests/test_template_content.py +88 -0
  179. tests/test_template_redis_key.py +135 -0
  180. tests/test_tools.py +154 -0
  181. tests/tools_example.py +258 -0
  182. crawlo/core/enhanced_engine.py +0 -190
  183. crawlo-1.1.4.dist-info/RECORD +0 -117
  184. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  185. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  186. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,36 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 日期工具封装
7
+ """
8
+
9
+ # 从 utils 模块导入日期工具
10
+ from ..utils.date_tools import (
11
+ TimeUtils,
12
+ parse_time,
13
+ format_time,
14
+ time_diff,
15
+ to_timestamp,
16
+ to_datetime,
17
+ now,
18
+ to_timezone,
19
+ to_utc,
20
+ to_local,
21
+ from_timestamp_with_tz
22
+ )
23
+
24
+ __all__ = [
25
+ "TimeUtils",
26
+ "parse_time",
27
+ "format_time",
28
+ "time_diff",
29
+ "to_timestamp",
30
+ "to_datetime",
31
+ "now",
32
+ "to_timezone",
33
+ "to_utc",
34
+ "to_local",
35
+ "from_timestamp_with_tz"
36
+ ]
@@ -0,0 +1,387 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 分布式协调工具
7
+ """
8
+
9
+ import hashlib
10
+ import time
11
+ import urllib.parse
12
+ from datetime import datetime
13
+ from typing import Dict, Any, Optional, Tuple, List, Set
14
+ from urllib.parse import urlparse
15
+
16
+
17
+ class TaskDistributor:
18
+ """任务分发工具类"""
19
+
20
+ @staticmethod
21
+ def generate_pagination_tasks(base_url: str, start_page: int = 1,
22
+ end_page: int = 100, page_param: str = "page") -> List[str]:
23
+ """
24
+ 生成分页任务URL列表
25
+
26
+ Args:
27
+ base_url (str): 基础URL
28
+ start_page (int): 起始页码
29
+ end_page (int): 结束页码
30
+ page_param (str): 分页参数名
31
+
32
+ Returns:
33
+ List[str]: 分页URL列表
34
+ """
35
+ tasks = []
36
+ parsed = urlparse(base_url)
37
+ query_dict = dict([q.split('=') for q in parsed.query.split('&') if q]) if parsed.query else {}
38
+
39
+ for page in range(start_page, end_page + 1):
40
+ query_dict[page_param] = str(page)
41
+ query_string = '&'.join([f"{k}={v}" for k, v in query_dict.items()])
42
+ new_parsed = parsed._replace(query=query_string)
43
+ tasks.append(urllib.parse.urlunparse(new_parsed))
44
+
45
+ return tasks
46
+
47
+ @staticmethod
48
+ def distribute_tasks(tasks: List[Any], num_workers: int) -> List[List[Any]]:
49
+ """
50
+ 将任务分发给多个工作节点
51
+
52
+ Args:
53
+ tasks (List[Any]): 任务列表
54
+ num_workers (int): 工作节点数量
55
+
56
+ Returns:
57
+ List[List[Any]]: 分发后的任务列表
58
+ """
59
+ if num_workers <= 0:
60
+ raise ValueError("工作节点数量必须大于0")
61
+
62
+ if not tasks:
63
+ return [[] for _ in range(num_workers)]
64
+
65
+ # 计算每个工作节点应分配的任务数量
66
+ tasks_per_worker = len(tasks) // num_workers
67
+ remaining_tasks = len(tasks) % num_workers
68
+
69
+ distributed_tasks = []
70
+ task_index = 0
71
+
72
+ for i in range(num_workers):
73
+ # 分配基础任务数量
74
+ worker_tasks_count = tasks_per_worker
75
+ # 分配剩余任务
76
+ if i < remaining_tasks:
77
+ worker_tasks_count += 1
78
+
79
+ worker_tasks = tasks[task_index:task_index + worker_tasks_count]
80
+ distributed_tasks.append(worker_tasks)
81
+ task_index += worker_tasks_count
82
+
83
+ return distributed_tasks
84
+
85
+
86
+ class DeduplicationTool:
87
+ """数据去重工具类"""
88
+
89
+ def __init__(self):
90
+ self.memory_set: Set[str] = set()
91
+ self.bloom_filter = None # 在实际应用中可以集成布隆过滤器
92
+
93
+ def generate_fingerprint(self, data: Any) -> str:
94
+ """
95
+ 生成数据指纹
96
+
97
+ Args:
98
+ data (Any): 数据
99
+
100
+ Returns:
101
+ str: 数据指纹(MD5哈希)
102
+ """
103
+ if isinstance(data, dict):
104
+ # 对于字典,排序键以确保一致性
105
+ data_str = str(sorted(data.items()))
106
+ else:
107
+ data_str = str(data)
108
+
109
+ return hashlib.md5(data_str.encode('utf-8')).hexdigest()
110
+
111
+ def is_duplicate(self, data: Any) -> bool:
112
+ """
113
+ 检查数据是否重复(内存去重)
114
+
115
+ Args:
116
+ data (Any): 数据
117
+
118
+ Returns:
119
+ bool: 是否重复
120
+ """
121
+ fingerprint = self.generate_fingerprint(data)
122
+ return fingerprint in self.memory_set
123
+
124
+ def add_to_dedup(self, data: Any) -> bool:
125
+ """
126
+ 将数据添加到去重集合
127
+
128
+ Args:
129
+ data (Any): 数据
130
+
131
+ Returns:
132
+ bool: 是否成功添加(True表示之前不存在,False表示已存在)
133
+ """
134
+ fingerprint = self.generate_fingerprint(data)
135
+ if fingerprint in self.memory_set:
136
+ return False
137
+ else:
138
+ self.memory_set.add(fingerprint)
139
+ return True
140
+
141
+ async def async_is_duplicate(self, data: Any) -> bool:
142
+ """
143
+ 异步检查数据是否重复
144
+
145
+ Args:
146
+ data (Any): 数据
147
+
148
+ Returns:
149
+ bool: 是否重复
150
+ """
151
+ return self.is_duplicate(data)
152
+
153
+ async def async_add_to_dedup(self, data: Any) -> bool:
154
+ """
155
+ 异步将数据添加到去重集合
156
+
157
+ Args:
158
+ data (Any): 数据
159
+
160
+ Returns:
161
+ bool: 是否成功添加
162
+ """
163
+ return self.add_to_dedup(data)
164
+
165
+
166
+ class DistributedCoordinator:
167
+ """分布式协调工具类"""
168
+
169
+ def __init__(self, redis_client: Any = None):
170
+ """
171
+ 初始化分布式协调工具
172
+
173
+ Args:
174
+ redis_client (Any): Redis客户端
175
+ """
176
+ self.redis_client = redis_client
177
+ self.task_distributor = TaskDistributor()
178
+ self.deduplication_tool = DeduplicationTool()
179
+
180
+ @staticmethod
181
+ def generate_task_id(url: str, spider_name: str) -> str:
182
+ """
183
+ 生成任务ID
184
+
185
+ Args:
186
+ url (str): URL
187
+ spider_name (str): 爬虫名称
188
+
189
+ Returns:
190
+ str: 任务ID
191
+ """
192
+ # 使用URL和爬虫名称生成唯一任务ID
193
+ unique_string = f"{url}_{spider_name}_{int(time.time() * 1000)}"
194
+ return hashlib.md5(unique_string.encode('utf-8')).hexdigest()
195
+
196
+ async def claim_task(self, task_id: str, worker_id: str,
197
+ timeout: int = 300) -> Tuple[bool, Optional[str]]:
198
+ """
199
+ 声明任务(分布式锁)
200
+
201
+ Args:
202
+ task_id (str): 任务ID
203
+ worker_id (str): 工作节点ID
204
+ timeout (int): 锁超时时间(秒)
205
+
206
+ Returns:
207
+ Tuple[bool, Optional[str]]: (是否成功声明, 错误信息)
208
+ """
209
+ # 如果没有Redis客户端,使用内存模拟
210
+ if self.redis_client is None:
211
+ # 模拟成功声明
212
+ return True, None
213
+
214
+ try:
215
+ # 实际实现应该使用Redis的SET命令带有NX和EX选项
216
+ # result = await self.redis_client.set(f"task_lock:{task_id}", worker_id, nx=True, ex=timeout)
217
+ # return bool(result), None if result else "任务已被其他节点声明"
218
+ return True, None
219
+ except Exception as e:
220
+ return False, str(e)
221
+
222
+ async def report_task_status(self, task_id: str, status: str, worker_id: str) -> bool:
223
+ """
224
+ 报告任务状态
225
+
226
+ Args:
227
+ task_id (str): 任务ID
228
+ status (str): 任务状态 (pending, processing, completed, failed)
229
+ worker_id (str): 工作节点ID
230
+
231
+ Returns:
232
+ bool: 是否成功报告
233
+ """
234
+ try:
235
+ status_info = {
236
+ "task_id": task_id,
237
+ "status": status,
238
+ "worker_id": worker_id,
239
+ "timestamp": datetime.now().isoformat()
240
+ }
241
+
242
+ if self.redis_client is None:
243
+ # 模拟成功报告
244
+ print(f"报告任务状态: {status_info}")
245
+ return True
246
+
247
+ # 实际实现应该将状态信息存储到Redis中
248
+ # await self.redis_client.hset(f"task_status:{task_id}", mapping=status_info)
249
+ return True
250
+ except Exception:
251
+ return False
252
+
253
+ async def get_cluster_info(self) -> Dict[str, Any]:
254
+ """
255
+ 获取集群信息
256
+
257
+ Returns:
258
+ Dict[str, Any]: 集群信息
259
+ """
260
+ try:
261
+ if self.redis_client is None:
262
+ # 返回模拟的集群信息
263
+ return {
264
+ "worker_count": 3,
265
+ "active_workers": ["worker_1", "worker_2", "worker_3"],
266
+ "task_queue_size": 100,
267
+ "processed_tasks": 500,
268
+ "failed_tasks": 5,
269
+ "timestamp": datetime.now().isoformat()
270
+ }
271
+
272
+ # 实际实现应该从Redis获取集群信息
273
+ # 这里返回模拟数据
274
+ return {
275
+ "worker_count": 3,
276
+ "active_workers": ["worker_1", "worker_2", "worker_3"],
277
+ "task_queue_size": 100,
278
+ "processed_tasks": 500,
279
+ "failed_tasks": 5,
280
+ "timestamp": datetime.now().isoformat()
281
+ }
282
+ except Exception as e:
283
+ return {"error": str(e)}
284
+
285
+ def generate_pagination_tasks(self, base_url: str, start_page: int = 1,
286
+ end_page: int = 100, page_param: str = "page") -> List[str]:
287
+ """
288
+ 生成分页任务URL列表
289
+
290
+ Args:
291
+ base_url (str): 基础URL
292
+ start_page (int): 起始页码
293
+ end_page (int): 结束页码
294
+ page_param (str): 分页参数名
295
+
296
+ Returns:
297
+ List[str]: 分页URL列表
298
+ """
299
+ return self.task_distributor.generate_pagination_tasks(base_url, start_page, end_page, page_param)
300
+
301
+ def distribute_tasks(self, tasks: List[Any], num_workers: int) -> List[List[Any]]:
302
+ """
303
+ 将任务分发给多个工作节点
304
+
305
+ Args:
306
+ tasks (List[Any]): 任务列表
307
+ num_workers (int): 工作节点数量
308
+
309
+ Returns:
310
+ List[List[Any]]: 分发后的任务列表
311
+ """
312
+ return self.task_distributor.distribute_tasks(tasks, num_workers)
313
+
314
+ async def is_duplicate(self, data: Any) -> bool:
315
+ """
316
+ 检查数据是否重复
317
+
318
+ Args:
319
+ data (Any): 数据
320
+
321
+ Returns:
322
+ bool: 是否重复
323
+ """
324
+ # 如果有Redis客户端,可以使用布隆过滤器或Redis集合进行去重
325
+ if self.redis_client is not None:
326
+ # 这里可以实现基于Redis的去重逻辑
327
+ pass
328
+
329
+ # 使用内存去重作为后备方案
330
+ return await self.deduplication_tool.async_is_duplicate(data)
331
+
332
+ async def add_to_dedup(self, data: Any) -> bool:
333
+ """
334
+ 将数据添加到去重集合
335
+
336
+ Args:
337
+ data (Any): 数据
338
+
339
+ Returns:
340
+ bool: 是否成功添加
341
+ """
342
+ # 如果有Redis客户端,可以使用布隆过滤器或Redis集合进行去重
343
+ if self.redis_client is not None:
344
+ # 这里可以实现基于Redis的去重逻辑
345
+ pass
346
+
347
+ # 使用内存去重作为后备方案
348
+ return await self.deduplication_tool.async_add_to_dedup(data)
349
+
350
+
351
+ # 便捷函数
352
+ def generate_task_id(url: str, spider_name: str) -> str:
353
+ """生成任务ID"""
354
+ return DistributedCoordinator.generate_task_id(url, spider_name)
355
+
356
+
357
+ async def claim_task(task_id: str, worker_id: str,
358
+ redis_client: Any = None, timeout: int = 300) -> Tuple[bool, Optional[str]]:
359
+ """声明任务"""
360
+ coordinator = DistributedCoordinator(redis_client)
361
+ return await coordinator.claim_task(task_id, worker_id, timeout)
362
+
363
+
364
+ async def report_task_status(task_id: str, status: str, worker_id: str,
365
+ redis_client: Any = None) -> bool:
366
+ """报告任务状态"""
367
+ coordinator = DistributedCoordinator(redis_client)
368
+ return await coordinator.report_task_status(task_id, status, worker_id)
369
+
370
+
371
+ async def get_cluster_info(redis_client: Any = None) -> Dict[str, Any]:
372
+ """获取集群信息"""
373
+ coordinator = DistributedCoordinator(redis_client)
374
+ return await coordinator.get_cluster_info()
375
+
376
+
377
+ def generate_pagination_tasks(base_url: str, start_page: int = 1,
378
+ end_page: int = 100, page_param: str = "page") -> List[str]:
379
+ """生成分页任务URL列表"""
380
+ coordinator = DistributedCoordinator()
381
+ return coordinator.generate_pagination_tasks(base_url, start_page, end_page, page_param)
382
+
383
+
384
+ def distribute_tasks(tasks: List[Any], num_workers: int) -> List[List[Any]]:
385
+ """将任务分发给多个工作节点"""
386
+ coordinator = DistributedCoordinator()
387
+ return coordinator.distribute_tasks(tasks, num_workers)
@@ -0,0 +1,221 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 重试机制工具
7
+ """
8
+
9
+ import time
10
+ import random
11
+ import asyncio
12
+ from typing import Callable, Any, Optional, Tuple, Set
13
+ from functools import wraps
14
+
15
+
16
+ class RetryMechanism:
17
+ """重试机制工具类"""
18
+
19
+ # 默认应该重试的HTTP状态码
20
+ DEFAULT_RETRY_STATUS_CODES = {429, 500, 502, 503, 504}
21
+
22
+ # 默认应该重试的异常类型
23
+ DEFAULT_RETRY_EXCEPTIONS = (
24
+ ConnectionError,
25
+ TimeoutError,
26
+ asyncio.TimeoutError,
27
+ )
28
+
29
+ def __init__(self, max_retries: int = 3,
30
+ retry_status_codes: Optional[Set[int]] = None,
31
+ retry_exceptions: Optional[Tuple[type, ...]] = None):
32
+ """
33
+ 初始化重试机制
34
+
35
+ Args:
36
+ max_retries (int): 最大重试次数
37
+ retry_status_codes (Optional[Set[int]]): 应该重试的HTTP状态码
38
+ retry_exceptions (Optional[Tuple[type, ...]]): 应该重试的异常类型
39
+ """
40
+ self.max_retries = max_retries
41
+ self.retry_status_codes = retry_status_codes or self.DEFAULT_RETRY_STATUS_CODES
42
+ self.retry_exceptions = retry_exceptions or self.DEFAULT_RETRY_EXCEPTIONS
43
+
44
+ def should_retry(self, status_code: Optional[int] = None,
45
+ exception: Optional[Exception] = None) -> bool:
46
+ """
47
+ 判断是否应该重试
48
+
49
+ Args:
50
+ status_code (Optional[int]): HTTP状态码
51
+ exception (Optional[Exception]): 异常对象
52
+
53
+ Returns:
54
+ bool: 是否应该重试
55
+ """
56
+ # 如果有状态码,检查是否在重试列表中
57
+ if status_code is not None and status_code in self.retry_status_codes:
58
+ return True
59
+
60
+ # 如果有异常,检查是否在重试列表中
61
+ if exception is not None and isinstance(exception, self.retry_exceptions):
62
+ return True
63
+
64
+ return False
65
+
66
+ def exponential_backoff(self, attempt: int, base_delay: float = 1.0,
67
+ max_delay: float = 60.0) -> float:
68
+ """
69
+ 计算指数退避延迟时间
70
+
71
+ Args:
72
+ attempt (int): 当前重试次数
73
+ base_delay (float): 基础延迟时间(秒)
74
+ max_delay (float): 最大延迟时间(秒)
75
+
76
+ Returns:
77
+ float: 延迟时间(秒)
78
+ """
79
+ # 计算基本延迟:base_delay * (2 ^ attempt)
80
+ delay = base_delay * (2 ** attempt)
81
+
82
+ # 添加随机抖动,避免惊群效应
83
+ jitter = random.uniform(0, 0.1) * delay
84
+
85
+ # 返回最终延迟时间,不超过最大延迟
86
+ return min(delay + jitter, max_delay)
87
+
88
+ async def async_retry(self, func: Callable, *args, **kwargs) -> Any:
89
+ """
90
+ 异步重试执行函数
91
+
92
+ Args:
93
+ func (Callable): 要执行的函数
94
+ *args: 函数参数
95
+ **kwargs: 函数关键字参数
96
+
97
+ Returns:
98
+ Any: 函数执行结果
99
+
100
+ Raises:
101
+ Exception: 如果超过最大重试次数仍未成功,则抛出最后一个异常
102
+ """
103
+ last_exception = None
104
+
105
+ for attempt in range(self.max_retries + 1):
106
+ try:
107
+ result = await func(*args, **kwargs)
108
+
109
+ # 如果函数返回状态码,检查是否需要重试
110
+ if hasattr(result, 'status') and self.should_retry(status_code=result.status):
111
+ if attempt < self.max_retries:
112
+ delay = self.exponential_backoff(attempt)
113
+ await asyncio.sleep(delay)
114
+ continue
115
+ else:
116
+ raise Exception(f"HTTP {result.status} after {self.max_retries} retries")
117
+
118
+ return result
119
+
120
+ except Exception as e:
121
+ last_exception = e
122
+
123
+ # 检查是否应该重试
124
+ if self.should_retry(exception=e) and attempt < self.max_retries:
125
+ delay = self.exponential_backoff(attempt)
126
+ await asyncio.sleep(delay)
127
+ continue
128
+ else:
129
+ raise e
130
+
131
+ # 如果到达这里,说明所有重试都失败了
132
+ raise last_exception
133
+
134
+ def sync_retry(self, func: Callable, *args, **kwargs) -> Any:
135
+ """
136
+ 同步重试执行函数
137
+
138
+ Args:
139
+ func (Callable): 要执行的函数
140
+ *args: 函数参数
141
+ **kwargs: 函数关键字参数
142
+
143
+ Returns:
144
+ Any: 函数执行结果
145
+
146
+ Raises:
147
+ Exception: 如果超过最大重试次数仍未成功,则抛出最后一个异常
148
+ """
149
+ last_exception = None
150
+
151
+ for attempt in range(self.max_retries + 1):
152
+ try:
153
+ result = func(*args, **kwargs)
154
+
155
+ # 如果函数返回状态码,检查是否需要重试
156
+ if hasattr(result, 'status') and self.should_retry(status_code=result.status):
157
+ if attempt < self.max_retries:
158
+ delay = self.exponential_backoff(attempt)
159
+ time.sleep(delay)
160
+ continue
161
+ else:
162
+ raise Exception(f"HTTP {result.status} after {self.max_retries} retries")
163
+
164
+ return result
165
+
166
+ except Exception as e:
167
+ last_exception = e
168
+
169
+ # 检查是否应该重试
170
+ if self.should_retry(exception=e) and attempt < self.max_retries:
171
+ delay = self.exponential_backoff(attempt)
172
+ time.sleep(delay)
173
+ continue
174
+ else:
175
+ raise e
176
+
177
+ # 如果到达这里,说明所有重试都失败了
178
+ raise last_exception
179
+
180
+
181
+ def retry(max_retries: int = 3,
182
+ retry_status_codes: Optional[Set[int]] = None,
183
+ retry_exceptions: Optional[Tuple[type, ...]] = None):
184
+ """
185
+ 重试装饰器
186
+
187
+ Args:
188
+ max_retries (int): 最大重试次数
189
+ retry_status_codes (Optional[Set[int]]): 应该重试的HTTP状态码
190
+ retry_exceptions (Optional[Tuple[type, ...]]): 应该重试的异常类型
191
+ """
192
+ def decorator(func: Callable) -> Callable:
193
+ retry_mechanism = RetryMechanism(max_retries, retry_status_codes, retry_exceptions)
194
+
195
+ if asyncio.iscoroutinefunction(func):
196
+ @wraps(func)
197
+ async def async_wrapper(*args, **kwargs):
198
+ return await retry_mechanism.async_retry(func, *args, **kwargs)
199
+ return async_wrapper
200
+ else:
201
+ @wraps(func)
202
+ def sync_wrapper(*args, **kwargs):
203
+ return retry_mechanism.sync_retry(func, *args, **kwargs)
204
+ return sync_wrapper
205
+
206
+ return decorator
207
+
208
+
209
+ # 便捷函数
210
+ def should_retry(status_code: Optional[int] = None,
211
+ exception: Optional[Exception] = None) -> bool:
212
+ """判断是否应该重试"""
213
+ retry_mechanism = RetryMechanism()
214
+ return retry_mechanism.should_retry(status_code, exception)
215
+
216
+
217
+ def exponential_backoff(attempt: int, base_delay: float = 1.0,
218
+ max_delay: float = 60.0) -> float:
219
+ """计算指数退避延迟时间"""
220
+ retry_mechanism = RetryMechanism()
221
+ return retry_mechanism.exponential_backoff(attempt, base_delay, max_delay)