crawlo 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (209) hide show
  1. crawlo/__init__.py +61 -61
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +60 -60
  4. crawlo/cleaners/data_formatter.py +225 -225
  5. crawlo/cleaners/encoding_converter.py +125 -125
  6. crawlo/cleaners/text_cleaner.py +232 -232
  7. crawlo/cli.py +75 -88
  8. crawlo/commands/__init__.py +14 -14
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/help.py +138 -144
  12. crawlo/commands/list.py +155 -155
  13. crawlo/commands/run.py +323 -323
  14. crawlo/commands/startproject.py +436 -436
  15. crawlo/commands/stats.py +187 -187
  16. crawlo/commands/utils.py +186 -186
  17. crawlo/config.py +312 -312
  18. crawlo/config_validator.py +251 -251
  19. crawlo/core/__init__.py +2 -2
  20. crawlo/core/engine.py +365 -356
  21. crawlo/core/processor.py +40 -40
  22. crawlo/core/scheduler.py +251 -239
  23. crawlo/crawler.py +1099 -1110
  24. crawlo/data/__init__.py +5 -5
  25. crawlo/data/user_agents.py +107 -107
  26. crawlo/downloader/__init__.py +266 -266
  27. crawlo/downloader/aiohttp_downloader.py +228 -221
  28. crawlo/downloader/cffi_downloader.py +256 -256
  29. crawlo/downloader/httpx_downloader.py +259 -259
  30. crawlo/downloader/hybrid_downloader.py +212 -212
  31. crawlo/downloader/playwright_downloader.py +402 -402
  32. crawlo/downloader/selenium_downloader.py +472 -472
  33. crawlo/event.py +11 -11
  34. crawlo/exceptions.py +81 -81
  35. crawlo/extension/__init__.py +39 -38
  36. crawlo/extension/health_check.py +141 -141
  37. crawlo/extension/log_interval.py +57 -57
  38. crawlo/extension/log_stats.py +81 -81
  39. crawlo/extension/logging_extension.py +43 -43
  40. crawlo/extension/memory_monitor.py +104 -104
  41. crawlo/extension/performance_profiler.py +133 -133
  42. crawlo/extension/request_recorder.py +107 -107
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +234 -234
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/items/__init__.py +23 -23
  47. crawlo/items/base.py +21 -21
  48. crawlo/items/fields.py +52 -52
  49. crawlo/items/items.py +104 -104
  50. crawlo/middleware/__init__.py +21 -21
  51. crawlo/middleware/default_header.py +131 -131
  52. crawlo/middleware/download_delay.py +104 -104
  53. crawlo/middleware/middleware_manager.py +136 -135
  54. crawlo/middleware/offsite.py +114 -114
  55. crawlo/middleware/proxy.py +367 -367
  56. crawlo/middleware/request_ignore.py +86 -86
  57. crawlo/middleware/response_code.py +163 -163
  58. crawlo/middleware/response_filter.py +136 -136
  59. crawlo/middleware/retry.py +124 -124
  60. crawlo/mode_manager.py +211 -211
  61. crawlo/network/__init__.py +21 -21
  62. crawlo/network/request.py +338 -338
  63. crawlo/network/response.py +359 -359
  64. crawlo/pipelines/__init__.py +21 -21
  65. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  66. crawlo/pipelines/console_pipeline.py +39 -39
  67. crawlo/pipelines/csv_pipeline.py +316 -316
  68. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  69. crawlo/pipelines/json_pipeline.py +218 -218
  70. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  71. crawlo/pipelines/mongo_pipeline.py +131 -131
  72. crawlo/pipelines/mysql_pipeline.py +317 -317
  73. crawlo/pipelines/pipeline_manager.py +62 -61
  74. crawlo/pipelines/redis_dedup_pipeline.py +166 -165
  75. crawlo/project.py +314 -279
  76. crawlo/queue/pqueue.py +37 -37
  77. crawlo/queue/queue_manager.py +377 -376
  78. crawlo/queue/redis_priority_queue.py +306 -306
  79. crawlo/settings/__init__.py +7 -7
  80. crawlo/settings/default_settings.py +219 -215
  81. crawlo/settings/setting_manager.py +122 -122
  82. crawlo/spider/__init__.py +639 -639
  83. crawlo/stats_collector.py +59 -59
  84. crawlo/subscriber.py +129 -129
  85. crawlo/task_manager.py +30 -30
  86. crawlo/templates/crawlo.cfg.tmpl +10 -10
  87. crawlo/templates/project/__init__.py.tmpl +3 -3
  88. crawlo/templates/project/items.py.tmpl +17 -17
  89. crawlo/templates/project/middlewares.py.tmpl +118 -118
  90. crawlo/templates/project/pipelines.py.tmpl +96 -96
  91. crawlo/templates/project/settings.py.tmpl +288 -288
  92. crawlo/templates/project/settings_distributed.py.tmpl +157 -157
  93. crawlo/templates/project/settings_gentle.py.tmpl +100 -100
  94. crawlo/templates/project/settings_high_performance.py.tmpl +134 -134
  95. crawlo/templates/project/settings_simple.py.tmpl +98 -98
  96. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  97. crawlo/templates/run.py.tmpl +47 -45
  98. crawlo/templates/spider/spider.py.tmpl +143 -143
  99. crawlo/tools/__init__.py +182 -182
  100. crawlo/tools/anti_crawler.py +268 -268
  101. crawlo/tools/authenticated_proxy.py +240 -240
  102. crawlo/tools/data_validator.py +180 -180
  103. crawlo/tools/date_tools.py +35 -35
  104. crawlo/tools/distributed_coordinator.py +386 -386
  105. crawlo/tools/retry_mechanism.py +220 -220
  106. crawlo/tools/scenario_adapter.py +262 -262
  107. crawlo/utils/__init__.py +35 -35
  108. crawlo/utils/batch_processor.py +259 -259
  109. crawlo/utils/controlled_spider_mixin.py +439 -439
  110. crawlo/utils/date_tools.py +290 -290
  111. crawlo/utils/db_helper.py +343 -343
  112. crawlo/utils/enhanced_error_handler.py +356 -356
  113. crawlo/utils/env_config.py +143 -106
  114. crawlo/utils/error_handler.py +123 -123
  115. crawlo/utils/func_tools.py +82 -82
  116. crawlo/utils/large_scale_config.py +286 -286
  117. crawlo/utils/large_scale_helper.py +344 -344
  118. crawlo/utils/log.py +128 -128
  119. crawlo/utils/performance_monitor.py +285 -285
  120. crawlo/utils/queue_helper.py +175 -175
  121. crawlo/utils/redis_connection_pool.py +351 -351
  122. crawlo/utils/redis_key_validator.py +198 -198
  123. crawlo/utils/request.py +267 -267
  124. crawlo/utils/request_serializer.py +218 -218
  125. crawlo/utils/spider_loader.py +61 -61
  126. crawlo/utils/system.py +11 -11
  127. crawlo/utils/tools.py +4 -4
  128. crawlo/utils/url.py +39 -39
  129. {crawlo-1.2.6.dist-info → crawlo-1.2.8.dist-info}/METADATA +764 -764
  130. crawlo-1.2.8.dist-info/RECORD +209 -0
  131. examples/__init__.py +7 -7
  132. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
  133. tests/__init__.py +7 -7
  134. tests/advanced_tools_example.py +275 -275
  135. tests/authenticated_proxy_example.py +236 -236
  136. tests/cleaners_example.py +160 -160
  137. tests/config_validation_demo.py +102 -102
  138. tests/controlled_spider_example.py +205 -205
  139. tests/date_tools_example.py +180 -180
  140. tests/dynamic_loading_example.py +523 -523
  141. tests/dynamic_loading_test.py +104 -104
  142. tests/env_config_example.py +133 -133
  143. tests/error_handling_example.py +171 -171
  144. tests/redis_key_validation_demo.py +130 -130
  145. tests/response_improvements_example.py +144 -144
  146. tests/test_advanced_tools.py +148 -148
  147. tests/test_all_redis_key_configs.py +145 -145
  148. tests/test_authenticated_proxy.py +141 -141
  149. tests/test_cleaners.py +54 -54
  150. tests/test_comprehensive.py +146 -146
  151. tests/test_config_consistency.py +81 -0
  152. tests/test_config_validator.py +193 -193
  153. tests/test_crawlo_proxy_integration.py +172 -172
  154. tests/test_date_tools.py +123 -123
  155. tests/test_default_header_middleware.py +158 -158
  156. tests/test_double_crawlo_fix.py +207 -207
  157. tests/test_double_crawlo_fix_simple.py +124 -124
  158. tests/test_download_delay_middleware.py +221 -221
  159. tests/test_downloader_proxy_compatibility.py +268 -268
  160. tests/test_dynamic_downloaders_proxy.py +124 -124
  161. tests/test_dynamic_proxy.py +92 -92
  162. tests/test_dynamic_proxy_config.py +146 -146
  163. tests/test_dynamic_proxy_real.py +109 -109
  164. tests/test_edge_cases.py +303 -303
  165. tests/test_enhanced_error_handler.py +270 -270
  166. tests/test_env_config.py +121 -121
  167. tests/test_error_handler_compatibility.py +112 -112
  168. tests/test_final_validation.py +153 -153
  169. tests/test_framework_env_usage.py +103 -103
  170. tests/test_integration.py +356 -356
  171. tests/test_item_dedup_redis_key.py +122 -122
  172. tests/test_mode_consistency.py +52 -0
  173. tests/test_offsite_middleware.py +221 -221
  174. tests/test_parsel.py +29 -29
  175. tests/test_performance.py +327 -327
  176. tests/test_proxy_api.py +264 -264
  177. tests/test_proxy_health_check.py +32 -32
  178. tests/test_proxy_middleware.py +121 -121
  179. tests/test_proxy_middleware_enhanced.py +216 -216
  180. tests/test_proxy_middleware_integration.py +136 -136
  181. tests/test_proxy_providers.py +56 -56
  182. tests/test_proxy_stats.py +19 -19
  183. tests/test_proxy_strategies.py +59 -59
  184. tests/test_queue_manager_double_crawlo.py +173 -173
  185. tests/test_queue_manager_redis_key.py +176 -176
  186. tests/test_real_scenario_proxy.py +195 -195
  187. tests/test_redis_config.py +28 -28
  188. tests/test_redis_connection_pool.py +294 -294
  189. tests/test_redis_key_naming.py +181 -181
  190. tests/test_redis_key_validator.py +123 -123
  191. tests/test_redis_queue.py +224 -224
  192. tests/test_request_ignore_middleware.py +182 -182
  193. tests/test_request_serialization.py +70 -70
  194. tests/test_response_code_middleware.py +349 -349
  195. tests/test_response_filter_middleware.py +427 -427
  196. tests/test_response_improvements.py +152 -152
  197. tests/test_retry_middleware.py +241 -241
  198. tests/test_scheduler.py +252 -241
  199. tests/test_scheduler_config_update.py +134 -0
  200. tests/test_simple_response.py +61 -61
  201. tests/test_telecom_spider_redis_key.py +205 -205
  202. tests/test_template_content.py +87 -87
  203. tests/test_template_redis_key.py +134 -134
  204. tests/test_tools.py +153 -153
  205. tests/tools_example.py +257 -257
  206. crawlo-1.2.6.dist-info/RECORD +0 -206
  207. {crawlo-1.2.6.dist-info → crawlo-1.2.8.dist-info}/WHEEL +0 -0
  208. {crawlo-1.2.6.dist-info → crawlo-1.2.8.dist-info}/entry_points.txt +0 -0
  209. {crawlo-1.2.6.dist-info → crawlo-1.2.8.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,1111 +1,1100 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- Crawlo Crawler Module
5
- ====================
6
- 提供爬虫进程管理和运行时核心功能。
7
-
8
- 核心组件:
9
- - Crawler: 单个爬虫运行实例,管理Spider与引擎的生命周期
10
- - CrawlerProcess: 爬虫进程管理器,支持多爬虫并发调度和资源管理
11
-
12
- 功能特性:
13
- - 智能并发控制和资源管理
14
- - 优雅关闭和信号处理
15
- - 统计监控和性能追踪
16
- - 自动模块发现和注册
17
- - 错误恢复和重试机制
18
- - 大规模爬虫优化支持
19
-
20
- 示例用法:
21
- # 单个爬虫运行
22
- crawler = Crawler(MySpider, settings)
23
- await crawler.crawl()
24
-
25
- # 多爬虫并发管理
26
- process = CrawlerProcess()
27
- await process.crawl([Spider1, Spider2])
28
- """
29
- from __future__ import annotations
30
- import asyncio
31
- import signal
32
- import time
33
- import threading
34
- from typing import Type, Optional, Set, List, Union, Dict, Any
35
- from .spider import Spider, get_global_spider_registry
36
- from .core.engine import Engine
37
- from .utils.log import get_logger
38
- from .subscriber import Subscriber
39
- from .extension import ExtensionManager
40
- from .stats_collector import StatsCollector
41
- from .event import spider_opened, spider_closed
42
- from .settings.setting_manager import SettingManager
43
- from crawlo.project import merge_settings, get_settings
44
-
45
-
46
- logger = get_logger(__name__)
47
-
48
-
49
- class CrawlerContext:
50
- """
51
- 爬虫上下文管理器
52
- 提供共享状态和资源管理
53
- """
54
-
55
- def __init__(self):
56
- self.start_time = time.time()
57
- self.total_crawlers = 0
58
- self.active_crawlers = 0
59
- self.completed_crawlers = 0
60
- self.failed_crawlers = 0
61
- self.error_log = []
62
- self._lock = threading.RLock()
63
-
64
- def increment_total(self):
65
- with self._lock:
66
- self.total_crawlers += 1
67
-
68
- def increment_active(self):
69
- with self._lock:
70
- self.active_crawlers += 1
71
-
72
- def decrement_active(self):
73
- with self._lock:
74
- self.active_crawlers -= 1
75
-
76
- def increment_completed(self):
77
- with self._lock:
78
- self.completed_crawlers += 1
79
-
80
- def increment_failed(self, error: str):
81
- with self._lock:
82
- self.failed_crawlers += 1
83
- self.error_log.append({
84
- 'timestamp': time.time(),
85
- 'error': error
86
- })
87
-
88
- def get_stats(self) -> Dict[str, Any]:
89
- with self._lock:
90
- duration = time.time() - self.start_time
91
- return {
92
- 'total_crawlers': self.total_crawlers,
93
- 'active_crawlers': self.active_crawlers,
94
- 'completed_crawlers': self.completed_crawlers,
95
- 'failed_crawlers': self.failed_crawlers,
96
- 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
97
- 'duration_seconds': round(duration, 2),
98
- 'error_count': len(self.error_log)
99
- }
100
-
101
-
102
- class Crawler:
103
- """
104
- 单个爬虫运行实例,管理 Spider 与引擎的生命周期
105
-
106
- 提供功能:
107
- - Spider 生命周期管理(初始化、运行、关闭)
108
- - 引擎组件的协调管理
109
- - 配置合并和验证
110
- - 统计数据收集
111
- - 扩展管理
112
- - 异常处理和清理
113
- """
114
-
115
- def __init__(self, spider_cls: Type[Spider], settings: SettingManager, context: Optional[CrawlerContext] = None):
116
- self.spider_cls = spider_cls
117
- self.spider: Optional[Spider] = None
118
- self.engine: Optional[Engine] = None
119
- self.stats: Optional[StatsCollector] = None
120
- self.subscriber: Optional[Subscriber] = None
121
- self.extension: Optional[ExtensionManager] = None
122
- self.settings: SettingManager = settings.copy()
123
- self.context = context or CrawlerContext()
124
-
125
- # 状态管理
126
- self._closed = False
127
- self._close_lock = asyncio.Lock()
128
- self._start_time = None
129
- self._end_time = None
130
-
131
- # 性能监控
132
- self._performance_metrics = {
133
- 'initialization_time': 0,
134
- 'crawl_duration': 0,
135
- 'memory_peak': 0,
136
- 'request_count': 0,
137
- 'error_count': 0
138
- }
139
-
140
- async def crawl(self):
141
- """
142
- 启动爬虫核心流程
143
-
144
- 包含以下阶段:
145
- 1. 初始化阶段: 创建所有组件
146
- 2. 验证阶段: 检查配置和状态
147
- 3. 运行阶段: 启动爬虫引擎
148
- 4. 清理阶段: 资源释放
149
- """
150
- init_start = time.time()
151
- self._start_time = init_start
152
-
153
- try:
154
- # 更新上下文状态
155
- self.context.increment_active()
156
-
157
- # 阶段 1: 初始化组件
158
- self.subscriber = self._create_subscriber()
159
- self.spider = self._create_spider()
160
- self.engine = self._create_engine()
161
- self.stats = self._create_stats()
162
- self.extension = self._create_extension()
163
-
164
- # 记录初始化时间
165
- self._performance_metrics['initialization_time'] = time.time() - init_start
166
-
167
- # 阶段 2: 验证状态
168
- self._validate_crawler_state()
169
-
170
- # 阶段 3: 启动爬虫
171
- crawl_start = time.time()
172
- await self.engine.start_spider(self.spider)
173
-
174
- # 记录爬取时间
175
- self._performance_metrics['crawl_duration'] = time.time() - crawl_start
176
- self._end_time = time.time()
177
-
178
- # 更新上下文状态
179
- self.context.increment_completed()
180
-
181
- logger.info(f"爬虫 {self.spider.name} 完成,耗时 {self._get_total_duration():.2f}秒")
182
-
183
- except Exception as e:
184
- self._performance_metrics['error_count'] += 1
185
- self.context.increment_failed(str(e))
186
- logger.error(f"爬虫 {getattr(self.spider, 'name', 'Unknown')} 运行失败: {e}", exc_info=True)
187
- raise
188
- finally:
189
- self.context.decrement_active()
190
- # 确保资源清理
191
- await self._ensure_cleanup()
192
-
193
- def _validate_crawler_state(self):
194
- """
195
- 验证爬虫状态和配置
196
- 确保所有必要组件都已正确初始化
197
- """
198
- if not self.spider:
199
- raise RuntimeError("爬虫实例未初始化")
200
- if not self.engine:
201
- raise RuntimeError("引擎未初始化")
202
- if not self.stats:
203
- raise RuntimeError("统计收集器未初始化")
204
- if not self.subscriber:
205
- raise RuntimeError("事件订阅器未初始化")
206
-
207
- # 检查关键配置
208
- if not self.spider.name:
209
- raise ValueError("爬虫名称不能为空")
210
-
211
- logger.debug(f"爬虫 {self.spider.name} 状态验证通过")
212
-
213
- def _get_total_duration(self) -> float:
214
- """获取总运行时间"""
215
- if self._start_time and self._end_time:
216
- return self._end_time - self._start_time
217
- return 0.0
218
-
219
- async def _ensure_cleanup(self):
220
- """确保资源清理"""
221
- try:
222
- if not self._closed:
223
- await self.close()
224
- except Exception as e:
225
- logger.warning(f"清理资源时发生错误: {e}")
226
-
227
- def get_performance_metrics(self) -> Dict[str, Any]:
228
- """获取性能指标"""
229
- metrics = self._performance_metrics.copy()
230
- metrics['total_duration'] = self._get_total_duration()
231
- if self.stats:
232
- # 添加统计数据
233
- stats_data = getattr(self.stats, 'get_stats', lambda: {})()
234
- metrics.update(stats_data)
235
- return metrics
236
- @staticmethod
237
- def _create_subscriber() -> Subscriber:
238
- """创建事件订阅器"""
239
- return Subscriber()
240
-
241
- def _create_spider(self) -> Spider:
242
- """
243
- 创建并验证爬虫实例(增强版)
244
-
245
- 执行以下验证:
246
- - 爬虫名称必须存在
247
- - start_requests 方法必须可调用
248
- - start_urls 不能是字符串
249
- - parse 方法建议存在
250
- """
251
- spider = self.spider_cls.create_instance(self)
252
-
253
- # 必要属性检查
254
- if not getattr(spider, 'name', None):
255
- raise AttributeError(
256
- f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。\n"
257
- f"示例: name = 'my_spider'"
258
- )
259
-
260
- if not callable(getattr(spider, 'start_requests', None)):
261
- raise AttributeError(
262
- f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。\n"
263
- f"示例: def start_requests(self): yield Request(url='...')"
264
- )
265
-
266
- # start_urls 类型检查
267
- start_urls = getattr(spider, 'start_urls', [])
268
- if isinstance(start_urls, str):
269
- raise TypeError(
270
- f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。\n"
271
- f"正确写法: start_urls = ['http://example.com']\n"
272
- f"错误写法: start_urls = 'http://example.com'"
273
- )
274
-
275
- # parse 方法检查(警告而非错误)
276
- if not callable(getattr(spider, 'parse', None)):
277
- logger.warning(
278
- f"爬虫 '{spider.name}' 未定义 'parse' 方法。\n"
279
- f"请确保所有 Request 都指定了回调函数,否则响应将被忽略。"
280
- )
281
-
282
- # 设置爬虫配置
283
- self._set_spider(spider)
284
-
285
- logger.debug(f"爬虫 '{spider.name}' 初始化完成")
286
- return spider
287
-
288
- def _create_engine(self) -> Engine:
289
- """创建并初始化引擎"""
290
- engine = Engine(self)
291
- engine.engine_start()
292
- logger.debug(f"引擎初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
293
- return engine
294
-
295
- def _create_stats(self) -> StatsCollector:
296
- """创建统计收集器"""
297
- stats = StatsCollector(self)
298
- logger.debug(f"统计收集器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
299
- return stats
300
-
301
- def _create_extension(self) -> ExtensionManager:
302
- """创建扩展管理器"""
303
- extension = ExtensionManager.create_instance(self)
304
- logger.debug(f"扩展管理器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
305
- return extension
306
-
307
- def _set_spider(self, spider: Spider):
308
- """
309
- 设置爬虫配置和事件订阅
310
- 将爬虫的生命周期事件与订阅器绑定
311
- """
312
- # 订阅爬虫生命周期事件
313
- self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
314
- self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
315
-
316
- # 合并爬虫自定义配置
317
- merge_settings(spider, self.settings)
318
-
319
- logger.debug(f"爬虫 '{spider.name}' 配置合并完成")
320
-
321
- async def close(self, reason='finished') -> None:
322
- """
323
- 关闭爬虫并清理资源(增强版)
324
-
325
- 确保只关闭一次,并处理所有清理操作
326
- """
327
- async with self._close_lock:
328
- if self._closed:
329
- return
330
-
331
- self._closed = True
332
- self._end_time = time.time()
333
-
334
- try:
335
- # 通知爬虫关闭事件
336
- if self.subscriber:
337
- await self.subscriber.notify(spider_closed)
338
-
339
- # 统计数据收集
340
- if self.stats and self.spider:
341
- self.stats.close_spider(spider=self.spider, reason=reason)
342
- # 记录统计数据
343
- try:
344
- from crawlo.commands.stats import record_stats
345
- record_stats(self)
346
- except ImportError:
347
- logger.debug("统计记录模块不存在,跳过统计记录")
348
-
349
- logger.info(
350
- f"爬虫 '{getattr(self.spider, 'name', 'Unknown')}' 已关闭,"
351
- f"原因: {reason},耗时: {self._get_total_duration():.2f}秒"
352
- )
353
-
354
- except Exception as e:
355
- logger.error(f"关闭爬虫时发生错误: {e}", exc_info=True)
356
- finally:
357
- # 确保资源清理
358
- await self._cleanup_resources()
359
-
360
- async def _cleanup_resources(self):
361
- """清理所有资源"""
362
- cleanup_tasks = []
363
-
364
- # 引擎清理
365
- if self.engine:
366
- try:
367
- cleanup_tasks.append(self.engine.close())
368
- except AttributeError:
369
- pass # 引擎没有close方法
370
-
371
- # 扩展清理
372
- if self.extension:
373
- try:
374
- cleanup_tasks.append(self.extension.close())
375
- except AttributeError:
376
- pass
377
-
378
- # 统计收集器清理
379
- if self.stats:
380
- try:
381
- cleanup_tasks.append(self.stats.close())
382
- except AttributeError:
383
- pass
384
-
385
- # 并发执行清理任务
386
- if cleanup_tasks:
387
- await asyncio.gather(*cleanup_tasks, return_exceptions=True)
388
-
389
- logger.debug("资源清理完成")
390
-
391
-
392
- class CrawlerProcess:
393
- """
394
- 爬虫进程管理器
395
-
396
- 支持功能:
397
- - 多爬虫并发调度和资源管理
398
- - 自动模块发现和爬虫注册
399
- - 智能并发控制和负载均衡
400
- - 优雅关闭和信号处理
401
- - 实时状态监控和统计
402
- - 错误恢复和重试机制
403
- - 大规模爬虫优化支持
404
-
405
- 使用示例:
406
- # 基本用法
407
- process = CrawlerProcess()
408
- await process.crawl(MySpider)
409
-
410
- # 多爬虫并发
411
- await process.crawl([Spider1, Spider2, 'spider_name'])
412
-
413
- # 自定义并发数
414
- process = CrawlerProcess(max_concurrency=8)
415
- """
416
-
417
- def __init__(
418
- self,
419
- settings: Optional[SettingManager] = None,
420
- max_concurrency: Optional[int] = None,
421
- spider_modules: Optional[List[str]] = None,
422
- enable_monitoring: bool = True
423
- ):
424
- # 基础配置
425
- self.settings: SettingManager = settings or self._get_default_settings()
426
- self.crawlers: Set[Crawler] = set()
427
- self._active_tasks: Set[asyncio.Task] = set()
428
-
429
- # 上下文管理器
430
- self.context = CrawlerContext()
431
-
432
- # 并发控制配置
433
- self.max_concurrency: int = (
434
- max_concurrency
435
- or self.settings.get('MAX_RUNNING_SPIDERS')
436
- or self.settings.get('CONCURRENCY', 3)
437
- )
438
- self.semaphore = asyncio.Semaphore(self.max_concurrency)
439
-
440
- # 监控配置
441
- self.enable_monitoring = enable_monitoring
442
- self._monitoring_task = None
443
- self._shutdown_event = asyncio.Event()
444
-
445
- # 自动发现并导入爬虫模块
446
- if spider_modules:
447
- self.auto_discover(spider_modules)
448
-
449
- # 使用全局注册表的快照(避免后续导入影响)
450
- self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
451
-
452
- # 性能监控
453
- self._performance_stats = {
454
- 'total_requests': 0,
455
- 'successful_requests': 0,
456
- 'failed_requests': 0,
457
- 'memory_usage_mb': 0,
458
- 'cpu_usage_percent': 0
459
- }
460
-
461
- # 注册信号量
462
- signal.signal(signal.SIGINT, self._shutdown)
463
- signal.signal(signal.SIGTERM, self._shutdown)
464
-
465
- self._log_startup_info()
466
-
467
- logger.debug(
468
- f"CrawlerProcess 初始化完成\n"
469
- f" - 最大并行爬虫数: {self.max_concurrency}\n"
470
- f" - 已注册爬虫数: {len(self._spider_registry)}\n"
471
- f" - 监控启用: {self.enable_monitoring}"
472
- )
473
-
474
- async def start_monitoring(self):
475
- """启动监控任务"""
476
- if not self.enable_monitoring:
477
- return
478
-
479
- self._monitoring_task = asyncio.create_task(self._monitor_loop())
480
- logger.debug("监控任务已启动")
481
-
482
- async def stop_monitoring(self):
483
- """停止监控任务"""
484
- if self._monitoring_task and not self._monitoring_task.done():
485
- self._monitoring_task.cancel()
486
- try:
487
- await self._monitoring_task
488
- except asyncio.CancelledError:
489
- pass
490
- logger.debug("监控任务已停止")
491
-
492
- async def _monitor_loop(self):
493
- """监控循环,定期收集和报告状态"""
494
- try:
495
- while not self._shutdown_event.is_set():
496
- await self._collect_performance_stats()
497
-
498
- # 每30秒输出一次状态
499
- stats = self.context.get_stats()
500
- if stats['active_crawlers'] > 0:
501
- logger.debug(
502
- f"爬虫状态: 活跃 {stats['active_crawlers']}, "
503
- f"完成 {stats['completed_crawlers']}, "
504
- f"失败 {stats['failed_crawlers']}, "
505
- f"成功率 {stats['success_rate']:.1f}%"
506
- )
507
-
508
- await asyncio.sleep(30) # 30秒间隔
509
-
510
- except asyncio.CancelledError:
511
- logger.debug("监控循环被取消")
512
- except Exception as e:
513
- logger.error(f"监控循环错误: {e}", exc_info=True)
514
-
515
- async def _collect_performance_stats(self):
516
- """收集性能统计数据"""
517
- try:
518
- import psutil
519
- import os
520
-
521
- process = psutil.Process(os.getpid())
522
- memory_info = process.memory_info()
523
-
524
- self._performance_stats.update({
525
- 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
526
- 'cpu_usage_percent': round(process.cpu_percent(), 2)
527
- })
528
-
529
- except ImportError:
530
- # psutil 不存在时跳过性能监控
531
- pass
532
- except Exception as e:
533
- logger.debug(f"收集性能统计失败: {e}")
534
- @staticmethod
535
- def auto_discover(modules: List[str]):
536
- """
537
- 自动导入模块,触发 Spider 类定义和注册(增强版)
538
-
539
- 支持递归扫描和错误恢复
540
- """
541
- import importlib
542
- import pkgutil
543
-
544
- discovered_count = 0
545
- error_count = 0
546
-
547
- for module_name in modules:
548
- try:
549
- module = importlib.import_module(module_name)
550
-
551
- if hasattr(module, '__path__'):
552
- # 包模块,递归扫描
553
- for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
554
- try:
555
- importlib.import_module(name)
556
- discovered_count += 1
557
- except Exception as sub_e:
558
- error_count += 1
559
- logger.warning(f"导入子模块 {name} 失败: {sub_e}")
560
- else:
561
- # 单个模块
562
- importlib.import_module(module_name)
563
- discovered_count += 1
564
-
565
- logger.debug(f"已扫描模块: {module_name}")
566
-
567
- except Exception as e:
568
- error_count += 1
569
- logger.error(f"扫描模块 {module_name} 失败: {e}", exc_info=True)
570
-
571
- logger.debug(
572
- f"爬虫注册完成: 成功 {discovered_count} 个,失败 {error_count} 个"
573
- )
574
-
575
- # === 公共只读接口:避免直接访问 _spider_registry ===
576
-
577
- def get_spider_names(self) -> List[str]:
578
- """获取所有已注册的爬虫名称"""
579
- return list(self._spider_registry.keys())
580
-
581
- def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
582
- """根据 name 获取爬虫类"""
583
- return self._spider_registry.get(name)
584
-
585
- def is_spider_registered(self, name: str) -> bool:
586
- """检查某个 name 是否已注册"""
587
- return name in self._spider_registry
588
-
589
- async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
590
- """
591
- 启动一个或多个爬虫
592
-
593
- 增强功能:
594
- - 智能并发控制
595
- - 实时监控和统计
596
- - 错误恢复和重试
597
- - 优雅关闭处理
598
- """
599
- # 阶段 1: 预处理和验证
600
- spider_classes_to_run = self._resolve_spiders_to_run(spiders)
601
- total = len(spider_classes_to_run)
602
-
603
- if total == 0:
604
- raise ValueError("至少需要提供一个爬虫类或名称")
605
-
606
- # 阶段 2: 初始化上下文和监控
607
- for _ in range(total):
608
- self.context.increment_total()
609
-
610
- # 启动监控任务
611
- await self.start_monitoring()
612
-
613
- try:
614
- # 阶段 3: 按类名排序,保证启动顺序可预测
615
- spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
616
-
617
- logger.debug(
618
- f"开始启动 {total} 个爬虫\n"
619
- f" - 最大并发数: {self.max_concurrency}\n"
620
- f" - 爬虫列表: {[cls.__name__ for cls in spider_classes_to_run]}"
621
- )
622
-
623
- # 阶段 4: 流式启动所有爬虫任务
624
- tasks = [
625
- asyncio.create_task(
626
- self._run_spider_with_limit(spider_cls, index + 1, total),
627
- name=f"spider-{spider_cls.__name__}-{index+1}"
628
- )
629
- for index, spider_cls in enumerate(spider_classes_to_run)
630
- ]
631
-
632
- # 阶段 5: 等待所有任务完成(失败不中断)
633
- results = await asyncio.gather(*tasks, return_exceptions=True)
634
-
635
- # 阶段 6: 统计异常和结果
636
- failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
637
- successful = total - len(failed)
638
-
639
- if failed:
640
- failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
641
- logger.error(
642
- f"爬虫执行结果: 成功 {successful}/{total},失败 {len(failed)}/{total}\n"
643
- f" - 失败爬虫: {failed_spiders}"
644
- )
645
-
646
- # 记录详细错误信息
647
- for i in failed:
648
- error = results[i]
649
- logger.error(f"爬虫 {spider_classes_to_run[i].__name__} 错误详情: {error}")
650
- else:
651
- logger.info(f"所有 {total} 个爬虫均成功完成! 🎉")
652
-
653
- # 返回统计结果
654
- return {
655
- 'total': total,
656
- 'successful': successful,
657
- 'failed': len(failed),
658
- 'success_rate': (successful / total) * 100 if total > 0 else 0,
659
- 'context_stats': self.context.get_stats()
660
- }
661
-
662
- finally:
663
- # 阶段 7: 清理和关闭
664
- await self.stop_monitoring()
665
- await self._cleanup_process()
666
-
667
- async def _cleanup_process(self):
668
- """清理进程资源"""
669
- try:
670
- # 等待所有活跃爬虫完成
671
- if self.crawlers:
672
- close_tasks = [crawler.close() for crawler in self.crawlers]
673
- await asyncio.gather(*close_tasks, return_exceptions=True)
674
- self.crawlers.clear()
675
-
676
- # 清理活跃任务
677
- if self._active_tasks:
678
- for task in list(self._active_tasks):
679
- if not task.done():
680
- task.cancel()
681
- await asyncio.gather(*self._active_tasks, return_exceptions=True)
682
- self._active_tasks.clear()
683
-
684
- logger.debug("进程资源清理完成")
685
-
686
- except Exception as e:
687
- logger.error(f"清理进程资源时发生错误: {e}", exc_info=True)
688
-
689
- def get_process_stats(self) -> Dict[str, Any]:
690
- """获取进程统计信息"""
691
- context_stats = self.context.get_stats()
692
-
693
- return {
694
- 'context': context_stats,
695
- 'performance': self._performance_stats.copy(),
696
- 'crawlers': {
697
- 'total_registered': len(self._spider_registry),
698
- 'active_crawlers': len(self.crawlers),
699
- 'max_concurrency': self.max_concurrency
700
- },
701
- 'registry': {
702
- 'spider_names': list(self._spider_registry.keys()),
703
- 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
704
- }
705
- }
706
- def _resolve_spiders_to_run(
707
- self,
708
- spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
709
- ) -> List[Type[Spider]]:
710
- """
711
- 解析输入为爬虫类列表
712
-
713
- 支持各种输入格式并验证唯一性
714
- """
715
- inputs = self._normalize_inputs(spiders_input)
716
- seen_spider_names: Set[str] = set()
717
- spider_classes: List[Type[Spider]] = []
718
-
719
- for item in inputs:
720
- try:
721
- spider_cls = self._resolve_spider_class(item)
722
- spider_name = getattr(spider_cls, 'name', None)
723
-
724
- if not spider_name:
725
- raise ValueError(f"爬虫类 {spider_cls.__name__} 缺少 'name' 属性")
726
-
727
- if spider_name in seen_spider_names:
728
- raise ValueError(
729
- f"本次运行中爬虫名称 '{spider_name}' 重复。\n"
730
- f"请确保每个爬虫的 name 属性在本次运行中唯一。"
731
- )
732
-
733
- seen_spider_names.add(spider_name)
734
- spider_classes.append(spider_cls)
735
-
736
- logger.debug(f"解析爬虫成功: {item} -> {spider_cls.__name__} (name='{spider_name}')")
737
-
738
- except Exception as e:
739
- logger.error(f"解析爬虫失败: {item} - {e}")
740
- raise
741
-
742
- return spider_classes
743
-
744
- @staticmethod
745
- def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
746
- """
747
- 标准化输入为列表
748
-
749
- 支持更多输入类型并提供更好的错误信息
750
- """
751
- if isinstance(spiders_input, (type, str)):
752
- return [spiders_input]
753
- elif isinstance(spiders_input, (list, tuple, set)):
754
- spider_list = list(spiders_input)
755
- if not spider_list:
756
- raise ValueError("爬虫列表不能为空")
757
- return spider_list
758
- else:
759
- raise TypeError(
760
- f"spiders 参数类型不支持: {type(spiders_input)}\n"
761
- f"支持的类型: Spider类、name字符串,或它们的列表/元组/集合"
762
- )
763
-
764
- def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
765
- """
766
- 解析单个输入项为爬虫类
767
-
768
- 提供更好的错误提示和调试信息
769
- """
770
- if isinstance(item, type) and issubclass(item, Spider):
771
- # 直接是 Spider 类
772
- return item
773
- elif isinstance(item, str):
774
- # 是字符串名称,需要查找注册表
775
- spider_cls = self._spider_registry.get(item)
776
- if not spider_cls:
777
- available_spiders = list(self._spider_registry.keys())
778
- raise ValueError(
779
- f"未找到名为 '{item}' 的爬虫。\n"
780
- f"已注册的爬虫: {available_spiders}\n"
781
- f"请检查爬虫名称是否正确,或者确保爬虫已被正确导入和注册。"
782
- )
783
- return spider_cls
784
- else:
785
- raise TypeError(
786
- f"无效类型 {type(item)}: {item}\n"
787
- f"必须是 Spider 类或字符串 name。\n"
788
- f"示例: MySpider 或 'my_spider'"
789
- )
790
-
791
- async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
792
- """
793
- 受信号量限制的爬虫运行函数
794
-
795
- 包含增强的错误处理和监控功能
796
- """
797
- task = asyncio.current_task()
798
- crawler = None
799
-
800
- try:
801
- # 注册任务
802
- if task:
803
- self._active_tasks.add(task)
804
-
805
- # 获取并发许可
806
- await self.semaphore.acquire()
807
-
808
- start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
809
- logger.info(start_msg)
810
-
811
- # 创建并运行爬虫
812
- crawler = Crawler(spider_cls, self.settings, self.context)
813
- self.crawlers.add(crawler)
814
-
815
- # 记录启动时间
816
- start_time = time.time()
817
-
818
- # 运行爬虫
819
- await crawler.crawl()
820
-
821
- # 计算运行时间
822
- duration = time.time() - start_time
823
-
824
- end_msg = (
825
- f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}, "
826
- f"耗时: {duration:.2f}秒"
827
- )
828
- logger.info(end_msg)
829
-
830
- # 记录成功统计
831
- self._performance_stats['successful_requests'] += 1
832
-
833
- except Exception as e:
834
- # 记录失败统计
835
- self._performance_stats['failed_requests'] += 1
836
-
837
- error_msg = f"爬虫 {spider_cls.__name__} 执行失败: {e}"
838
- logger.error(error_msg, exc_info=True)
839
-
840
- # 将错误信息记录到上下文
841
- if hasattr(self, 'context'):
842
- self.context.increment_failed(error_msg)
843
-
844
- raise
845
- finally:
846
- # 清理资源
847
- try:
848
- if crawler and crawler in self.crawlers:
849
- self.crawlers.remove(crawler)
850
-
851
- if task and task in self._active_tasks:
852
- self._active_tasks.remove(task)
853
-
854
- self.semaphore.release()
855
-
856
- except Exception as cleanup_error:
857
- logger.warning(f"清理资源时发生错误: {cleanup_error}")
858
-
859
- def _shutdown(self, _signum, _frame):
860
- """
861
- 优雅关闭信号处理
862
-
863
- 提供更好的关闭体验和资源清理
864
- """
865
- signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
866
- logger.warning(f"收到关闭信号 {signal_name},正在停止所有爬虫...")
867
-
868
- # 设置关闭事件
869
- if hasattr(self, '_shutdown_event'):
870
- self._shutdown_event.set()
871
-
872
- # 停止所有爬虫引擎
873
- for crawler in list(self.crawlers):
874
- if crawler.engine:
875
- crawler.engine.running = False
876
- crawler.engine.normal = False
877
- logger.debug(f"已停止爬虫引擎: {getattr(crawler.spider, 'name', 'Unknown')}")
878
-
879
- # 创建关闭任务
880
- asyncio.create_task(self._wait_for_shutdown())
881
-
882
- logger.info("关闭指令已发送,等待爬虫完成当前任务...")
883
-
884
- async def _wait_for_shutdown(self):
885
- """
886
- 等待所有活跃任务完成
887
-
888
- 提供更好的关闭时间控制和进度反馈
889
- """
890
- try:
891
- # 停止监控任务
892
- await self.stop_monitoring()
893
-
894
- # 等待活跃任务完成
895
- pending = [t for t in self._active_tasks if not t.done()]
896
-
897
- if pending:
898
- logger.info(
899
- f"等待 {len(pending)} 个活跃任务完成..."
900
- f"(最大等待时间: 30秒)"
901
- )
902
-
903
- # 设置超时时间
904
- try:
905
- await asyncio.wait_for(
906
- asyncio.gather(*pending, return_exceptions=True),
907
- timeout=30.0
908
- )
909
- except asyncio.TimeoutError:
910
- logger.warning("部分任务超时,强制取消中...")
911
-
912
- # 强制取消超时任务
913
- for task in pending:
914
- if not task.done():
915
- task.cancel()
916
-
917
- # 等待取消完成
918
- await asyncio.gather(*pending, return_exceptions=True)
919
-
920
- # 最终清理
921
- await self._cleanup_process()
922
-
923
- # 输出最终统计
924
- final_stats = self.context.get_stats()
925
- logger.info(
926
- f"所有爬虫已优雅关闭 👋\n"
927
- f" - 总计爬虫: {final_stats['total_crawlers']}\n"
928
- f" - 成功完成: {final_stats['completed_crawlers']}\n"
929
- f" - 失败数量: {final_stats['failed_crawlers']}\n"
930
- f" - 成功率: {final_stats['success_rate']:.1f}%\n"
931
- f" - 总运行时间: {final_stats['duration_seconds']}秒"
932
- )
933
-
934
- except Exception as e:
935
- logger.error(f"关闭过程中发生错误: {e}", exc_info=True)
936
-
937
- @classmethod
938
- def _get_default_settings(cls) -> SettingManager:
939
- """
940
- 加载默认配置
941
-
942
- 提供更好的错误处理和降级策略
943
- """
944
- try:
945
- settings = get_settings()
946
- logger.debug("成功加载默认配置")
947
- return settings
948
- except Exception as e:
949
- logger.warning(f"无法加载默认配置: {e},使用空配置")
950
- return SettingManager()
951
-
952
- def _log_startup_info(self):
953
- """打印启动信息,包括运行模式和关键配置检查"""
954
- # 获取运行模式
955
- run_mode = self.settings.get('RUN_MODE', 'standalone')
956
-
957
- # 构建启动信息日志
958
- startup_info = [
959
- "🚀 Crawlo 爬虫框架启动",
960
- f" 运行模式: {run_mode}"
961
- ]
962
-
963
- # 根据运行模式添加特定信息
964
- if run_mode == 'distributed':
965
- startup_info.append(" 🌐 分布式模式 - 支持多节点协同工作")
966
- # 检查Redis配置
967
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
968
- redis_port = self.settings.get('REDIS_PORT', 6379)
969
- startup_info.append(f" Redis地址: {redis_host}:{redis_port}")
970
-
971
- # 检查队列类型
972
- queue_type = self.settings.get('QUEUE_TYPE', 'redis')
973
- if queue_type != 'redis':
974
- startup_info.append(f" ⚠️ 警告: 分布式模式下建议使用 'redis' 队列类型,当前为 '{queue_type}'")
975
- else:
976
- startup_info.append(" 🏠 单机模式 - 适用于开发和小规模数据采集")
977
- # 检查队列类型
978
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
979
- if queue_type != 'memory' and queue_type != 'auto':
980
- startup_info.append(f" ⚠️ 警告: 单机模式下建议使用 'memory' 队列类型,当前为 '{queue_type}'")
981
-
982
- # 检查关键配置项
983
- concurrency = self.settings.get('CONCURRENCY', 8)
984
- download_delay = self.settings.get('DOWNLOAD_DELAY', 1.0)
985
- filter_class = self.settings.get('FILTER_CLASS', 'crawlo.filters.memory_filter.MemoryFilter')
986
-
987
- # 并发数检查
988
- if run_mode == 'distributed':
989
- if concurrency < 8:
990
- startup_info.append(f" ⚠️ 警告: 分布式模式下建议并发数 >= 8,当前为 {concurrency}")
991
- else:
992
- if concurrency > 16:
993
- startup_info.append(f" ⚠️ 警告: 单机模式下建议并发数 <= 16,当前为 {concurrency}")
994
-
995
- # 下载延迟检查
996
- if download_delay < 0.1:
997
- startup_info.append(f" ⚠️ 警告: 下载延迟过小({download_delay}s)可能导致被封IP")
998
- elif download_delay > 10:
999
- startup_info.append(f" ⚠️ 警告: 下载延迟过大({download_delay}s)可能影响效率")
1000
-
1001
- startup_info.extend([
1002
- f" 并发数: {concurrency}",
1003
- f" 下载延迟: {download_delay}秒",
1004
- f" 过滤器类: {filter_class}"
1005
- ])
1006
-
1007
- # 检查去重管道配置
1008
- default_dedup_pipeline = self.settings.get('DEFAULT_DEDUP_PIPELINE', '')
1009
- pipelines = self.settings.get('PIPELINES', [])
1010
-
1011
- if default_dedup_pipeline:
1012
- startup_info.append(f" 默认去重管道: {default_dedup_pipeline}")
1013
-
1014
- # 检查去重管道是否在PIPELINES列表中
1015
- if default_dedup_pipeline and default_dedup_pipeline not in pipelines:
1016
- startup_info.append(f" ⚠️ 警告: 默认去重管道 '{default_dedup_pipeline}' 未添加到 PIPELINES 列表中")
1017
-
1018
- # 检查下载器配置
1019
- downloader = self.settings.get('DOWNLOADER', 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader')
1020
- # startup_info.append(f" 下载器: {downloader}")
1021
-
1022
- # 检查中间件配置
1023
- middlewares = self.settings.get('MIDDLEWARES', [])
1024
- # startup_info.append(f" 中间件数量: {len(middlewares)}")
1025
-
1026
- # 检查扩展组件配置
1027
- extensions = self.settings.get('EXTENSIONS', [])
1028
- # startup_info.append(f" 扩展组件数量: {len(extensions)}")
1029
-
1030
- # 输出启动信息
1031
- logger.info("\n".join(startup_info))
1032
-
1033
-
1034
- # === 工具函数 ===
1035
-
1036
- def create_crawler_with_optimizations(
1037
- spider_cls: Type[Spider],
1038
- settings: Optional[SettingManager] = None,
1039
- **optimization_kwargs
1040
- ) -> Crawler:
1041
- """
1042
- 创建优化的爬虫实例
1043
-
1044
- :param spider_cls: 爬虫类
1045
- :param settings: 设置管理器
1046
- :param optimization_kwargs: 优化参数
1047
- :return: 爬虫实例
1048
- """
1049
- if settings is None:
1050
- settings = SettingManager()
1051
-
1052
- # 应用优化配置
1053
- for key, value in optimization_kwargs.items():
1054
- settings.set(key, value)
1055
-
1056
- context = CrawlerContext()
1057
- return Crawler(spider_cls, settings, context)
1058
-
1059
-
1060
- def create_process_with_large_scale_config(
1061
- config_type: str = 'balanced',
1062
- concurrency: int = 16,
1063
- **kwargs
1064
- ) -> CrawlerProcess:
1065
- """
1066
- 创建支持大规模优化的进程管理器
1067
-
1068
- :param config_type: 配置类型 ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1069
- :param concurrency: 并发数
1070
- :param kwargs: 其他参数
1071
- :return: 进程管理器
1072
- """
1073
- try:
1074
- from crawlo.utils.large_scale_config import LargeScaleConfig
1075
-
1076
- # 获取优化配置
1077
- config_methods = {
1078
- 'conservative': LargeScaleConfig.conservative_config,
1079
- 'balanced': LargeScaleConfig.balanced_config,
1080
- 'aggressive': LargeScaleConfig.aggressive_config,
1081
- 'memory_optimized': LargeScaleConfig.memory_optimized_config
1082
- }
1083
-
1084
- if config_type not in config_methods:
1085
- logger.warning(f"未知的配置类型: {config_type},使用默认配置")
1086
- settings = SettingManager()
1087
- else:
1088
- config = config_methods[config_type](concurrency)
1089
- settings = SettingManager()
1090
- settings.update(config)
1091
-
1092
- return CrawlerProcess(
1093
- settings=settings,
1094
- max_concurrency=concurrency,
1095
- **kwargs
1096
- )
1097
-
1098
- except ImportError:
1099
- logger.warning("大规模配置模块不存在,使用默认配置")
1100
- return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1101
-
1102
-
1103
- # === 导出接口 ===
1104
-
1105
- __all__ = [
1106
- 'Crawler',
1107
- 'CrawlerProcess',
1108
- 'CrawlerContext',
1109
- 'create_crawler_with_optimizations',
1110
- 'create_process_with_large_scale_config'
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ Crawlo Crawler Module
5
+ ====================
6
+ 提供爬虫进程管理和运行时核心功能。
7
+
8
+ 核心组件:
9
+ - Crawler: 单个爬虫运行实例,管理Spider与引擎的生命周期
10
+ - CrawlerProcess: 爬虫进程管理器,支持多爬虫并发调度和资源管理
11
+
12
+ 功能特性:
13
+ - 智能并发控制和资源管理
14
+ - 优雅关闭和信号处理
15
+ - 统计监控和性能追踪
16
+ - 自动模块发现和注册
17
+ - 错误恢复和重试机制
18
+ - 大规模爬虫优化支持
19
+
20
+ 示例用法:
21
+ # 单个爬虫运行
22
+ crawler = Crawler(MySpider, settings)
23
+ await crawler.crawl()
24
+
25
+ # 多爬虫并发管理
26
+ process = CrawlerProcess()
27
+ await process.crawl([Spider1, Spider2])
28
+ """
29
+ from __future__ import annotations
30
+ import asyncio
31
+ import signal
32
+ import time
33
+ import threading
34
+ from typing import Type, Optional, Set, List, Union, Dict, Any
35
+ from .spider import Spider, get_global_spider_registry
36
+ from .core.engine import Engine
37
+ from .utils.log import get_logger
38
+ from .subscriber import Subscriber
39
+ from .extension import ExtensionManager
40
+ from .stats_collector import StatsCollector
41
+ from .event import spider_opened, spider_closed
42
+ from .settings.setting_manager import SettingManager
43
+ from crawlo.project import merge_settings, get_settings
44
+
45
+
46
+ logger = get_logger(__name__)
47
+
48
+
49
+ class CrawlerContext:
50
+ """
51
+ 爬虫上下文管理器
52
+ 提供共享状态和资源管理
53
+ """
54
+
55
+ def __init__(self):
56
+ self.start_time = time.time()
57
+ self.total_crawlers = 0
58
+ self.active_crawlers = 0
59
+ self.completed_crawlers = 0
60
+ self.failed_crawlers = 0
61
+ self.error_log = []
62
+ self._lock = threading.RLock()
63
+
64
+ def increment_total(self):
65
+ with self._lock:
66
+ self.total_crawlers += 1
67
+
68
+ def increment_active(self):
69
+ with self._lock:
70
+ self.active_crawlers += 1
71
+
72
+ def decrement_active(self):
73
+ with self._lock:
74
+ self.active_crawlers -= 1
75
+
76
+ def increment_completed(self):
77
+ with self._lock:
78
+ self.completed_crawlers += 1
79
+
80
+ def increment_failed(self, error: str):
81
+ with self._lock:
82
+ self.failed_crawlers += 1
83
+ self.error_log.append({
84
+ 'timestamp': time.time(),
85
+ 'error': error
86
+ })
87
+
88
+ def get_stats(self) -> Dict[str, Any]:
89
+ with self._lock:
90
+ duration = time.time() - self.start_time
91
+ return {
92
+ 'total_crawlers': self.total_crawlers,
93
+ 'active_crawlers': self.active_crawlers,
94
+ 'completed_crawlers': self.completed_crawlers,
95
+ 'failed_crawlers': self.failed_crawlers,
96
+ 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
97
+ 'duration_seconds': round(duration, 2),
98
+ 'error_count': len(self.error_log)
99
+ }
100
+
101
+
102
+ class Crawler:
103
+ """
104
+ 单个爬虫运行实例,管理 Spider 与引擎的生命周期
105
+
106
+ 提供功能:
107
+ - Spider 生命周期管理(初始化、运行、关闭)
108
+ - 引擎组件的协调管理
109
+ - 配置合并和验证
110
+ - 统计数据收集
111
+ - 扩展管理
112
+ - 异常处理和清理
113
+ """
114
+
115
+ def __init__(self, spider_cls: Type[Spider], settings: SettingManager, context: Optional[CrawlerContext] = None):
116
+ self.spider_cls = spider_cls
117
+ self.spider: Optional[Spider] = None
118
+ self.engine: Optional[Engine] = None
119
+ self.stats: Optional[StatsCollector] = None
120
+ self.subscriber: Optional[Subscriber] = None
121
+ self.extension: Optional[ExtensionManager] = None
122
+ self.settings: SettingManager = settings.copy()
123
+ self.context = context or CrawlerContext()
124
+
125
+ # 状态管理
126
+ self._closed = False
127
+ self._close_lock = asyncio.Lock()
128
+ self._start_time = None
129
+ self._end_time = None
130
+
131
+ # 性能监控
132
+ self._performance_metrics = {
133
+ 'initialization_time': 0,
134
+ 'crawl_duration': 0,
135
+ 'memory_peak': 0,
136
+ 'request_count': 0,
137
+ 'error_count': 0
138
+ }
139
+
140
+ async def crawl(self):
141
+ """
142
+ 启动爬虫核心流程
143
+
144
+ 包含以下阶段:
145
+ 1. 初始化阶段: 创建所有组件
146
+ 2. 验证阶段: 检查配置和状态
147
+ 3. 运行阶段: 启动爬虫引擎
148
+ 4. 清理阶段: 资源释放
149
+ """
150
+ init_start = time.time()
151
+ self._start_time = init_start
152
+
153
+ try:
154
+ # 更新上下文状态
155
+ self.context.increment_active()
156
+
157
+ # 阶段 1: 初始化组件
158
+ # 调整组件初始化顺序,确保日志输出顺序符合要求
159
+ self.subscriber = self._create_subscriber()
160
+ self.spider = self._create_spider()
161
+ self.engine = self._create_engine()
162
+ self.stats = self._create_stats()
163
+ # 注意:这里不初始化扩展管理器,让它在引擎中初始化
164
+
165
+ # 记录初始化时间
166
+ self._performance_metrics['initialization_time'] = time.time() - init_start
167
+
168
+ # 阶段 2: 验证状态
169
+ self._validate_crawler_state()
170
+
171
+ # 阶段 3: 显示运行配置摘要
172
+ self._log_runtime_summary()
173
+
174
+ # 阶段 4: 启动爬虫
175
+ crawl_start = time.time()
176
+ await self.engine.start_spider(self.spider)
177
+
178
+ # 记录爬取时间
179
+ self._performance_metrics['crawl_duration'] = time.time() - crawl_start
180
+ self._end_time = time.time()
181
+
182
+ # 更新上下文状态
183
+ self.context.increment_completed()
184
+
185
+ logger.info(f"爬虫 {self.spider.name} 完成,耗时 {self._get_total_duration():.2f}秒")
186
+
187
+ except Exception as e:
188
+ self._performance_metrics['error_count'] += 1
189
+ self.context.increment_failed(str(e))
190
+ logger.error(f"爬虫 {getattr(self.spider, 'name', 'Unknown')} 运行失败: {e}", exc_info=True)
191
+ raise
192
+ finally:
193
+ self.context.decrement_active()
194
+ # 确保资源清理
195
+ await self._ensure_cleanup()
196
+
197
+ def _log_runtime_summary(self):
198
+ """记录运行时配置摘要"""
199
+ # 获取爬虫名称
200
+ spider_name = getattr(self.spider, 'name', 'Unknown')
201
+
202
+ # 显示简化的运行时信息,避免与项目初始化重复
203
+ logger.info(f"🕷️ 开始运行爬虫: {spider_name}")
204
+
205
+ # 注意:并发数和下载延迟信息已在其他地方显示,避免重复
206
+ # 如果需要显示其他运行时特定信息,可以在这里添加
207
+
208
+ def _validate_crawler_state(self):
209
+ """
210
+ 验证爬虫状态和配置
211
+ 确保所有必要组件都已正确初始化
212
+ """
213
+ if not self.spider:
214
+ raise RuntimeError("爬虫实例未初始化")
215
+ if not self.engine:
216
+ raise RuntimeError("引擎未初始化")
217
+ if not self.stats:
218
+ raise RuntimeError("统计收集器未初始化")
219
+ if not self.subscriber:
220
+ raise RuntimeError("事件订阅器未初始化")
221
+
222
+ # 检查关键配置
223
+ if not self.spider.name:
224
+ raise ValueError("爬虫名称不能为空")
225
+
226
+ logger.debug(f"爬虫 {self.spider.name} 状态验证通过")
227
+
228
+ def _get_total_duration(self) -> float:
229
+ """获取总运行时间"""
230
+ if self._start_time and self._end_time:
231
+ return self._end_time - self._start_time
232
+ return 0.0
233
+
234
+ async def _ensure_cleanup(self):
235
+ """确保资源清理"""
236
+ try:
237
+ if not self._closed:
238
+ await self.close()
239
+ except Exception as e:
240
+ logger.warning(f"清理资源时发生错误: {e}")
241
+
242
+ def get_performance_metrics(self) -> Dict[str, Any]:
243
+ """获取性能指标"""
244
+ metrics = self._performance_metrics.copy()
245
+ metrics['total_duration'] = self._get_total_duration()
246
+ if self.stats:
247
+ # 添加统计数据
248
+ stats_data = getattr(self.stats, 'get_stats', lambda: {})()
249
+ metrics.update(stats_data)
250
+ return metrics
251
+ @staticmethod
252
+ def _create_subscriber() -> Subscriber:
253
+ """创建事件订阅器"""
254
+ return Subscriber()
255
+
256
+ def _create_spider(self) -> Spider:
257
+ """
258
+ 创建并验证爬虫实例(增强版)
259
+
260
+ 执行以下验证:
261
+ - 爬虫名称必须存在
262
+ - start_requests 方法必须可调用
263
+ - start_urls 不能是字符串
264
+ - parse 方法建议存在
265
+ """
266
+ spider = self.spider_cls.create_instance(self)
267
+
268
+ # 必要属性检查
269
+ if not getattr(spider, 'name', None):
270
+ raise AttributeError(
271
+ f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。\n"
272
+ f"示例: name = 'my_spider'"
273
+ )
274
+
275
+ if not callable(getattr(spider, 'start_requests', None)):
276
+ raise AttributeError(
277
+ f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。\n"
278
+ f"示例: def start_requests(self): yield Request(url='...')"
279
+ )
280
+
281
+ # start_urls 类型检查
282
+ start_urls = getattr(spider, 'start_urls', [])
283
+ if isinstance(start_urls, str):
284
+ raise TypeError(
285
+ f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。\n"
286
+ f"正确写法: start_urls = ['http://example.com']\n"
287
+ f"错误写法: start_urls = 'http://example.com'"
288
+ )
289
+
290
+ # parse 方法检查(警告而非错误)
291
+ if not callable(getattr(spider, 'parse', None)):
292
+ logger.warning(
293
+ f"爬虫 '{spider.name}' 未定义 'parse' 方法。\n"
294
+ f"请确保所有 Request 都指定了回调函数,否则响应将被忽略。"
295
+ )
296
+
297
+ # 设置爬虫配置
298
+ self._set_spider(spider)
299
+
300
+ logger.debug(f"爬虫 '{spider.name}' 初始化完成")
301
+ return spider
302
+
303
+ def _create_engine(self) -> Engine:
304
+ """创建并初始化引擎"""
305
+ engine = Engine(self)
306
+ engine.engine_start()
307
+ logger.debug(f"引擎初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
308
+ return engine
309
+
310
+ def _create_stats(self) -> StatsCollector:
311
+ """创建统计收集器"""
312
+ stats = StatsCollector(self)
313
+ logger.debug(f"统计收集器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
314
+ return stats
315
+
316
+ def _create_extension(self) -> ExtensionManager:
317
+ """创建扩展管理器"""
318
+ # 修改扩展管理器的创建方式,延迟初始化直到需要时
319
+ extension = ExtensionManager.create_instance(self)
320
+ logger.debug(f"扩展管理器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
321
+ return extension
322
+
323
+ def _set_spider(self, spider: Spider):
324
+ """
325
+ 设置爬虫配置和事件订阅
326
+ 将爬虫的生命周期事件与订阅器绑定
327
+ """
328
+ # 订阅爬虫生命周期事件
329
+ self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
330
+ self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
331
+
332
+ # 合并爬虫自定义配置
333
+ merge_settings(spider, self.settings)
334
+
335
+ logger.debug(f"爬虫 '{spider.name}' 配置合并完成")
336
+
337
+ async def close(self, reason='finished') -> None:
338
+ """
339
+ 关闭爬虫并清理资源(增强版)
340
+
341
+ 确保只关闭一次,并处理所有清理操作
342
+ """
343
+ async with self._close_lock:
344
+ if self._closed:
345
+ return
346
+
347
+ self._closed = True
348
+ self._end_time = time.time()
349
+
350
+ try:
351
+ # 通知爬虫关闭事件
352
+ if self.subscriber:
353
+ await self.subscriber.notify(spider_closed)
354
+
355
+ # 统计数据收集
356
+ if self.stats and self.spider:
357
+ self.stats.close_spider(spider=self.spider, reason=reason)
358
+ # 记录统计数据
359
+ try:
360
+ from crawlo.commands.stats import record_stats
361
+ record_stats(self)
362
+ except ImportError:
363
+ logger.debug("统计记录模块不存在,跳过统计记录")
364
+
365
+ logger.info(
366
+ f"爬虫 '{getattr(self.spider, 'name', 'Unknown')}' 已关闭,"
367
+ f"原因: {reason},耗时: {self._get_total_duration():.2f}秒"
368
+ )
369
+
370
+ except Exception as e:
371
+ logger.error(f"关闭爬虫时发生错误: {e}", exc_info=True)
372
+ finally:
373
+ # 确保资源清理
374
+ await self._cleanup_resources()
375
+
376
+ async def _cleanup_resources(self):
377
+ """清理所有资源"""
378
+ cleanup_tasks = []
379
+
380
+ # 引擎清理
381
+ if self.engine:
382
+ try:
383
+ cleanup_tasks.append(self.engine.close())
384
+ except AttributeError:
385
+ pass # 引擎没有close方法
386
+
387
+ # 扩展清理
388
+ if self.extension:
389
+ try:
390
+ cleanup_tasks.append(self.extension.close())
391
+ except AttributeError:
392
+ pass
393
+
394
+ # 统计收集器清理
395
+ if self.stats:
396
+ try:
397
+ cleanup_tasks.append(self.stats.close())
398
+ except AttributeError:
399
+ pass
400
+
401
+ # 并发执行清理任务
402
+ if cleanup_tasks:
403
+ await asyncio.gather(*cleanup_tasks, return_exceptions=True)
404
+
405
+ logger.debug("资源清理完成")
406
+
407
+
408
+ class CrawlerProcess:
409
+ """
410
+ 爬虫进程管理器
411
+
412
+ 支持功能:
413
+ - 多爬虫并发调度和资源管理
414
+ - 自动模块发现和爬虫注册
415
+ - 智能并发控制和负载均衡
416
+ - 优雅关闭和信号处理
417
+ - 实时状态监控和统计
418
+ - 错误恢复和重试机制
419
+ - 大规模爬虫优化支持
420
+
421
+ 使用示例:
422
+ # 基本用法
423
+ process = CrawlerProcess()
424
+ await process.crawl(MySpider)
425
+
426
+ # 多爬虫并发
427
+ await process.crawl([Spider1, Spider2, 'spider_name'])
428
+
429
+ # 自定义并发数
430
+ process = CrawlerProcess(max_concurrency=8)
431
+ """
432
+
433
+ def __init__(
434
+ self,
435
+ settings: Optional[SettingManager] = None,
436
+ max_concurrency: Optional[int] = None,
437
+ spider_modules: Optional[List[str]] = None,
438
+ enable_monitoring: bool = True
439
+ ):
440
+ # 基础配置
441
+ self.settings: SettingManager = settings or self._get_default_settings()
442
+ self.crawlers: Set[Crawler] = set()
443
+ self._active_tasks: Set[asyncio.Task] = set()
444
+
445
+ # 上下文管理器
446
+ self.context = CrawlerContext()
447
+
448
+ # 并发控制配置
449
+ self.max_concurrency: int = (
450
+ max_concurrency
451
+ or self.settings.get('MAX_RUNNING_SPIDERS')
452
+ or self.settings.get('CONCURRENCY', 3)
453
+ )
454
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
455
+
456
+ # 监控配置
457
+ self.enable_monitoring = enable_monitoring
458
+ self._monitoring_task = None
459
+ self._shutdown_event = asyncio.Event()
460
+
461
+ # 自动发现并导入爬虫模块
462
+ if spider_modules:
463
+ self.auto_discover(spider_modules)
464
+
465
+ # 使用全局注册表的快照(避免后续导入影响)
466
+ self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
467
+
468
+ # 性能监控
469
+ self._performance_stats = {
470
+ 'total_requests': 0,
471
+ 'successful_requests': 0,
472
+ 'failed_requests': 0,
473
+ 'memory_usage_mb': 0,
474
+ 'cpu_usage_percent': 0
475
+ }
476
+
477
+ # 注册信号量
478
+ signal.signal(signal.SIGINT, self._shutdown)
479
+ signal.signal(signal.SIGTERM, self._shutdown)
480
+
481
+ self._log_startup_info()
482
+
483
+ logger.debug(
484
+ f"CrawlerProcess 初始化完成\n"
485
+ f" - 最大并行爬虫数: {self.max_concurrency}\n"
486
+ f" - 已注册爬虫数: {len(self._spider_registry)}\n"
487
+ f" - 监控启用: {self.enable_monitoring}"
488
+ )
489
+
490
+ async def start_monitoring(self):
491
+ """启动监控任务"""
492
+ if not self.enable_monitoring:
493
+ return
494
+
495
+ self._monitoring_task = asyncio.create_task(self._monitor_loop())
496
+ logger.debug("监控任务已启动")
497
+
498
+ async def stop_monitoring(self):
499
+ """停止监控任务"""
500
+ if self._monitoring_task and not self._monitoring_task.done():
501
+ self._monitoring_task.cancel()
502
+ try:
503
+ await self._monitoring_task
504
+ except asyncio.CancelledError:
505
+ pass
506
+ logger.debug("监控任务已停止")
507
+
508
+ async def _monitor_loop(self):
509
+ """监控循环,定期收集和报告状态"""
510
+ try:
511
+ while not self._shutdown_event.is_set():
512
+ await self._collect_performance_stats()
513
+
514
+ # 每30秒输出一次状态
515
+ stats = self.context.get_stats()
516
+ if stats['active_crawlers'] > 0:
517
+ logger.debug(
518
+ f"爬虫状态: 活跃 {stats['active_crawlers']}, "
519
+ f"完成 {stats['completed_crawlers']}, "
520
+ f"失败 {stats['failed_crawlers']}, "
521
+ f"成功率 {stats['success_rate']:.1f}%"
522
+ )
523
+
524
+ await asyncio.sleep(30) # 30秒间隔
525
+
526
+ except asyncio.CancelledError:
527
+ logger.debug("监控循环被取消")
528
+ except Exception as e:
529
+ logger.error(f"监控循环错误: {e}", exc_info=True)
530
+
531
+ async def _collect_performance_stats(self):
532
+ """收集性能统计数据"""
533
+ try:
534
+ import psutil
535
+ import os
536
+
537
+ process = psutil.Process(os.getpid())
538
+ memory_info = process.memory_info()
539
+
540
+ self._performance_stats.update({
541
+ 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
542
+ 'cpu_usage_percent': round(process.cpu_percent(), 2)
543
+ })
544
+
545
+ except ImportError:
546
+ # psutil 不存在时跳过性能监控
547
+ pass
548
+ except Exception as e:
549
+ logger.debug(f"收集性能统计失败: {e}")
550
+ @staticmethod
551
+ def auto_discover(modules: List[str]):
552
+ """
553
+ 自动导入模块,触发 Spider 类定义和注册(增强版)
554
+
555
+ 支持递归扫描和错误恢复
556
+ """
557
+ import importlib
558
+ import pkgutil
559
+
560
+ discovered_count = 0
561
+ error_count = 0
562
+
563
+ for module_name in modules:
564
+ try:
565
+ module = importlib.import_module(module_name)
566
+
567
+ if hasattr(module, '__path__'):
568
+ # 包模块,递归扫描
569
+ for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
570
+ try:
571
+ importlib.import_module(name)
572
+ discovered_count += 1
573
+ except Exception as sub_e:
574
+ error_count += 1
575
+ logger.warning(f"导入子模块 {name} 失败: {sub_e}")
576
+ else:
577
+ # 单个模块
578
+ importlib.import_module(module_name)
579
+ discovered_count += 1
580
+
581
+ logger.debug(f"已扫描模块: {module_name}")
582
+
583
+ except Exception as e:
584
+ error_count += 1
585
+ logger.error(f"扫描模块 {module_name} 失败: {e}", exc_info=True)
586
+
587
+ logger.debug(
588
+ f"爬虫注册完成: 成功 {discovered_count} 个,失败 {error_count} 个"
589
+ )
590
+
591
+ # === 公共只读接口:避免直接访问 _spider_registry ===
592
+
593
+ def get_spider_names(self) -> List[str]:
594
+ """获取所有已注册的爬虫名称"""
595
+ return list(self._spider_registry.keys())
596
+
597
+ def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
598
+ """根据 name 获取爬虫类"""
599
+ return self._spider_registry.get(name)
600
+
601
+ def is_spider_registered(self, name: str) -> bool:
602
+ """检查某个 name 是否已注册"""
603
+ return name in self._spider_registry
604
+
605
+ async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
606
+ """
607
+ 启动一个或多个爬虫
608
+
609
+ 增强功能:
610
+ - 智能并发控制
611
+ - 实时监控和统计
612
+ - 错误恢复和重试
613
+ - 优雅关闭处理
614
+ """
615
+ # 阶段 1: 预处理和验证
616
+ spider_classes_to_run = self._resolve_spiders_to_run(spiders)
617
+ total = len(spider_classes_to_run)
618
+
619
+ if total == 0:
620
+ raise ValueError("至少需要提供一个爬虫类或名称")
621
+
622
+ # 阶段 2: 初始化上下文和监控
623
+ for _ in range(total):
624
+ self.context.increment_total()
625
+
626
+ # 启动监控任务
627
+ await self.start_monitoring()
628
+
629
+ try:
630
+ # 阶段 3: 按类名排序,保证启动顺序可预测
631
+ spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
632
+
633
+ logger.debug(
634
+ f"开始启动 {total} 个爬虫\n"
635
+ f" - 最大并发数: {self.max_concurrency}\n"
636
+ f" - 爬虫列表: {[cls.__name__ for cls in spider_classes_to_run]}"
637
+ )
638
+
639
+ # 阶段 4: 流式启动所有爬虫任务
640
+ tasks = [
641
+ asyncio.create_task(
642
+ self._run_spider_with_limit(spider_cls, index + 1, total),
643
+ name=f"spider-{spider_cls.__name__}-{index+1}"
644
+ )
645
+ for index, spider_cls in enumerate(spider_classes_to_run)
646
+ ]
647
+
648
+ # 阶段 5: 等待所有任务完成(失败不中断)
649
+ results = await asyncio.gather(*tasks, return_exceptions=True)
650
+
651
+ # 阶段 6: 统计异常和结果
652
+ failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
653
+ successful = total - len(failed)
654
+
655
+ if failed:
656
+ failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
657
+ logger.error(
658
+ f"爬虫执行结果: 成功 {successful}/{total},失败 {len(failed)}/{total}\n"
659
+ f" - 失败爬虫: {failed_spiders}"
660
+ )
661
+
662
+ # 记录详细错误信息
663
+ for i in failed:
664
+ error = results[i]
665
+ logger.error(f"爬虫 {spider_classes_to_run[i].__name__} 错误详情: {error}")
666
+ else:
667
+ logger.info(f"所有 {total} 个爬虫均成功完成! 🎉")
668
+
669
+ # 返回统计结果
670
+ return {
671
+ 'total': total,
672
+ 'successful': successful,
673
+ 'failed': len(failed),
674
+ 'success_rate': (successful / total) * 100 if total > 0 else 0,
675
+ 'context_stats': self.context.get_stats()
676
+ }
677
+
678
+ finally:
679
+ # 阶段 7: 清理和关闭
680
+ await self.stop_monitoring()
681
+ await self._cleanup_process()
682
+
683
+ async def _cleanup_process(self):
684
+ """清理进程资源"""
685
+ try:
686
+ # 等待所有活跃爬虫完成
687
+ if self.crawlers:
688
+ close_tasks = [crawler.close() for crawler in self.crawlers]
689
+ await asyncio.gather(*close_tasks, return_exceptions=True)
690
+ self.crawlers.clear()
691
+
692
+ # 清理活跃任务
693
+ if self._active_tasks:
694
+ for task in list(self._active_tasks):
695
+ if not task.done():
696
+ task.cancel()
697
+ await asyncio.gather(*self._active_tasks, return_exceptions=True)
698
+ self._active_tasks.clear()
699
+
700
+ logger.debug("进程资源清理完成")
701
+
702
+ except Exception as e:
703
+ logger.error(f"清理进程资源时发生错误: {e}", exc_info=True)
704
+
705
+ def get_process_stats(self) -> Dict[str, Any]:
706
+ """获取进程统计信息"""
707
+ context_stats = self.context.get_stats()
708
+
709
+ return {
710
+ 'context': context_stats,
711
+ 'performance': self._performance_stats.copy(),
712
+ 'crawlers': {
713
+ 'total_registered': len(self._spider_registry),
714
+ 'active_crawlers': len(self.crawlers),
715
+ 'max_concurrency': self.max_concurrency
716
+ },
717
+ 'registry': {
718
+ 'spider_names': list(self._spider_registry.keys()),
719
+ 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
720
+ }
721
+ }
722
+ def _resolve_spiders_to_run(
723
+ self,
724
+ spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
725
+ ) -> List[Type[Spider]]:
726
+ """
727
+ 解析输入为爬虫类列表
728
+
729
+ 支持各种输入格式并验证唯一性
730
+ """
731
+ inputs = self._normalize_inputs(spiders_input)
732
+ seen_spider_names: Set[str] = set()
733
+ spider_classes: List[Type[Spider]] = []
734
+
735
+ for item in inputs:
736
+ try:
737
+ spider_cls = self._resolve_spider_class(item)
738
+ spider_name = getattr(spider_cls, 'name', None)
739
+
740
+ if not spider_name:
741
+ raise ValueError(f"爬虫类 {spider_cls.__name__} 缺少 'name' 属性")
742
+
743
+ if spider_name in seen_spider_names:
744
+ raise ValueError(
745
+ f"本次运行中爬虫名称 '{spider_name}' 重复。\n"
746
+ f"请确保每个爬虫的 name 属性在本次运行中唯一。"
747
+ )
748
+
749
+ seen_spider_names.add(spider_name)
750
+ spider_classes.append(spider_cls)
751
+
752
+ logger.debug(f"解析爬虫成功: {item} -> {spider_cls.__name__} (name='{spider_name}')")
753
+
754
+ except Exception as e:
755
+ logger.error(f"解析爬虫失败: {item} - {e}")
756
+ raise
757
+
758
+ return spider_classes
759
+
760
+ @staticmethod
761
+ def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
762
+ """
763
+ 标准化输入为列表
764
+
765
+ 支持更多输入类型并提供更好的错误信息
766
+ """
767
+ if isinstance(spiders_input, (type, str)):
768
+ return [spiders_input]
769
+ elif isinstance(spiders_input, (list, tuple, set)):
770
+ spider_list = list(spiders_input)
771
+ if not spider_list:
772
+ raise ValueError("爬虫列表不能为空")
773
+ return spider_list
774
+ else:
775
+ raise TypeError(
776
+ f"spiders 参数类型不支持: {type(spiders_input)}\n"
777
+ f"支持的类型: Spider类、name字符串,或它们的列表/元组/集合"
778
+ )
779
+
780
+ def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
781
+ """
782
+ 解析单个输入项为爬虫类
783
+
784
+ 提供更好的错误提示和调试信息
785
+ """
786
+ if isinstance(item, type) and issubclass(item, Spider):
787
+ # 直接是 Spider
788
+ return item
789
+ elif isinstance(item, str):
790
+ # 是字符串名称,需要查找注册表
791
+ spider_cls = self._spider_registry.get(item)
792
+ if not spider_cls:
793
+ available_spiders = list(self._spider_registry.keys())
794
+ raise ValueError(
795
+ f"未找到名为 '{item}' 的爬虫。\n"
796
+ f"已注册的爬虫: {available_spiders}\n"
797
+ f"请检查爬虫名称是否正确,或者确保爬虫已被正确导入和注册。"
798
+ )
799
+ return spider_cls
800
+ else:
801
+ raise TypeError(
802
+ f"无效类型 {type(item)}: {item}\n"
803
+ f"必须是 Spider 类或字符串 name。\n"
804
+ f"示例: MySpider 或 'my_spider'"
805
+ )
806
+
807
+ async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
808
+ """
809
+ 受信号量限制的爬虫运行函数
810
+
811
+ 包含增强的错误处理和监控功能
812
+ """
813
+ task = asyncio.current_task()
814
+ crawler = None
815
+
816
+ try:
817
+ # 注册任务
818
+ if task:
819
+ self._active_tasks.add(task)
820
+
821
+ # 获取并发许可
822
+ await self.semaphore.acquire()
823
+
824
+ start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
825
+ logger.info(start_msg)
826
+
827
+ # 创建并运行爬虫
828
+ crawler = Crawler(spider_cls, self.settings, self.context)
829
+ self.crawlers.add(crawler)
830
+
831
+ # 记录启动时间
832
+ start_time = time.time()
833
+
834
+ # 运行爬虫
835
+ await crawler.crawl()
836
+
837
+ # 计算运行时间
838
+ duration = time.time() - start_time
839
+
840
+ end_msg = (
841
+ f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}, "
842
+ f"耗时: {duration:.2f}秒"
843
+ )
844
+ logger.info(end_msg)
845
+
846
+ # 记录成功统计
847
+ self._performance_stats['successful_requests'] += 1
848
+
849
+ except Exception as e:
850
+ # 记录失败统计
851
+ self._performance_stats['failed_requests'] += 1
852
+
853
+ error_msg = f"爬虫 {spider_cls.__name__} 执行失败: {e}"
854
+ logger.error(error_msg, exc_info=True)
855
+
856
+ # 将错误信息记录到上下文
857
+ if hasattr(self, 'context'):
858
+ self.context.increment_failed(error_msg)
859
+
860
+ raise
861
+ finally:
862
+ # 清理资源
863
+ try:
864
+ if crawler and crawler in self.crawlers:
865
+ self.crawlers.remove(crawler)
866
+
867
+ if task and task in self._active_tasks:
868
+ self._active_tasks.remove(task)
869
+
870
+ self.semaphore.release()
871
+
872
+ except Exception as cleanup_error:
873
+ logger.warning(f"清理资源时发生错误: {cleanup_error}")
874
+
875
+ def _shutdown(self, _signum, _frame):
876
+ """
877
+ 优雅关闭信号处理
878
+
879
+ 提供更好的关闭体验和资源清理
880
+ """
881
+ signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
882
+ logger.warning(f"收到关闭信号 {signal_name},正在停止所有爬虫...")
883
+
884
+ # 设置关闭事件
885
+ if hasattr(self, '_shutdown_event'):
886
+ self._shutdown_event.set()
887
+
888
+ # 停止所有爬虫引擎
889
+ for crawler in list(self.crawlers):
890
+ if crawler.engine:
891
+ crawler.engine.running = False
892
+ crawler.engine.normal = False
893
+ logger.debug(f"已停止爬虫引擎: {getattr(crawler.spider, 'name', 'Unknown')}")
894
+
895
+ # 创建关闭任务
896
+ asyncio.create_task(self._wait_for_shutdown())
897
+
898
+ logger.info("关闭指令已发送,等待爬虫完成当前任务...")
899
+
900
+ async def _wait_for_shutdown(self):
901
+ """
902
+ 等待所有活跃任务完成
903
+
904
+ 提供更好的关闭时间控制和进度反馈
905
+ """
906
+ try:
907
+ # 停止监控任务
908
+ await self.stop_monitoring()
909
+
910
+ # 等待活跃任务完成
911
+ pending = [t for t in self._active_tasks if not t.done()]
912
+
913
+ if pending:
914
+ logger.info(
915
+ f"等待 {len(pending)} 个活跃任务完成..."
916
+ f"(最大等待时间: 30秒)"
917
+ )
918
+
919
+ # 设置超时时间
920
+ try:
921
+ await asyncio.wait_for(
922
+ asyncio.gather(*pending, return_exceptions=True),
923
+ timeout=30.0
924
+ )
925
+ except asyncio.TimeoutError:
926
+ logger.warning("部分任务超时,强制取消中...")
927
+
928
+ # 强制取消超时任务
929
+ for task in pending:
930
+ if not task.done():
931
+ task.cancel()
932
+
933
+ # 等待取消完成
934
+ await asyncio.gather(*pending, return_exceptions=True)
935
+
936
+ # 最终清理
937
+ await self._cleanup_process()
938
+
939
+ # 输出最终统计
940
+ final_stats = self.context.get_stats()
941
+ logger.info(
942
+ f"所有爬虫已优雅关闭 👋\n"
943
+ f" - 总计爬虫: {final_stats['total_crawlers']}\n"
944
+ f" - 成功完成: {final_stats['completed_crawlers']}\n"
945
+ f" - 失败数量: {final_stats['failed_crawlers']}\n"
946
+ f" - 成功率: {final_stats['success_rate']:.1f}%\n"
947
+ f" - 总运行时间: {final_stats['duration_seconds']}秒"
948
+ )
949
+
950
+ except Exception as e:
951
+ logger.error(f"关闭过程中发生错误: {e}", exc_info=True)
952
+
953
+ @classmethod
954
+ def _get_default_settings(cls) -> SettingManager:
955
+ """
956
+ 加载默认配置
957
+
958
+ 提供更好的错误处理和降级策略
959
+ """
960
+ try:
961
+ settings = get_settings()
962
+ logger.debug("成功加载默认配置")
963
+ return settings
964
+ except Exception as e:
965
+ logger.warning(f"无法加载默认配置: {e},使用空配置")
966
+ return SettingManager()
967
+
968
+ def _log_startup_info(self):
969
+ """打印启动信息,包括运行模式和关键配置检查"""
970
+ # 获取运行模式
971
+ run_mode = self.settings.get('RUN_MODE', 'standalone')
972
+
973
+ # 构建启动信息日志
974
+ startup_info = [
975
+ "🚀 Crawlo 爬虫框架启动"
976
+ ]
977
+
978
+ # 获取实际的队列类型
979
+ queue_type = self.settings.get('QUEUE_TYPE', 'memory')
980
+
981
+ # 根据运行模式和队列类型组合显示信息
982
+ if run_mode == 'distributed':
983
+ startup_info.append(" 运行模式: distributed")
984
+ startup_info.append(" 🌐 分布式模式 - 支持多节点协同工作")
985
+ # 显示Redis配置
986
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
987
+ redis_port = self.settings.get('REDIS_PORT', 6379)
988
+ startup_info.append(f" Redis地址: {redis_host}:{redis_port}")
989
+ elif run_mode == 'standalone':
990
+ if queue_type == 'redis':
991
+ startup_info.append(" 运行模式: standalone+redis")
992
+ # startup_info.append(" 🌐 分布式模式 - 支持多节点协同工作")
993
+ # 显示Redis配置
994
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
995
+ redis_port = self.settings.get('REDIS_PORT', 6379)
996
+ startup_info.append(f" Redis地址: {redis_host}:{redis_port}")
997
+ elif queue_type == 'auto':
998
+ startup_info.append(" 运行模式: standalone+auto")
999
+ # startup_info.append(" 🤖 自动检测模式 - 智能选择最佳运行方式")
1000
+ else: # memory
1001
+ startup_info.append(" 运行模式: standalone")
1002
+ # startup_info.append(" 🏠 单机模式 - 适用于开发和小规模数据采集")
1003
+ else: # auto mode
1004
+ if queue_type == 'redis':
1005
+ startup_info.append(" 运行模式: auto+redis")
1006
+ # startup_info.append(" 🌐 分布式模式 - 支持多节点协同工作")
1007
+ # 显示Redis配置
1008
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
1009
+ redis_port = self.settings.get('REDIS_PORT', 6379)
1010
+ startup_info.append(f" Redis地址: {redis_host}:{redis_port}")
1011
+ elif queue_type == 'memory':
1012
+ startup_info.append(" 运行模式: auto+memory")
1013
+ # startup_info.append(" 🏠 单机模式 - 适用于开发和小规模数据采集")
1014
+ else: # auto
1015
+ startup_info.append(" 运行模式: auto")
1016
+ # startup_info.append(" 🤖 自动检测模式 - 智能选择最佳运行方式")
1017
+
1018
+ # 打印启动信息
1019
+ for info in startup_info:
1020
+ logger.info(info)
1021
+
1022
+
1023
+ # === 工具函数 ===
1024
+
1025
+ def create_crawler_with_optimizations(
1026
+ spider_cls: Type[Spider],
1027
+ settings: Optional[SettingManager] = None,
1028
+ **optimization_kwargs
1029
+ ) -> Crawler:
1030
+ """
1031
+ 创建优化的爬虫实例
1032
+
1033
+ :param spider_cls: 爬虫类
1034
+ :param settings: 设置管理器
1035
+ :param optimization_kwargs: 优化参数
1036
+ :return: 爬虫实例
1037
+ """
1038
+ if settings is None:
1039
+ settings = SettingManager()
1040
+
1041
+ # 应用优化配置
1042
+ for key, value in optimization_kwargs.items():
1043
+ settings.set(key, value)
1044
+
1045
+ context = CrawlerContext()
1046
+ return Crawler(spider_cls, settings, context)
1047
+
1048
+
1049
+ def create_process_with_large_scale_config(
1050
+ config_type: str = 'balanced',
1051
+ concurrency: int = 16,
1052
+ **kwargs
1053
+ ) -> CrawlerProcess:
1054
+ """
1055
+ 创建支持大规模优化的进程管理器
1056
+
1057
+ :param config_type: 配置类型 ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1058
+ :param concurrency: 并发数
1059
+ :param kwargs: 其他参数
1060
+ :return: 进程管理器
1061
+ """
1062
+ try:
1063
+ from crawlo.utils.large_scale_config import LargeScaleConfig
1064
+
1065
+ # 获取优化配置
1066
+ config_methods = {
1067
+ 'conservative': LargeScaleConfig.conservative_config,
1068
+ 'balanced': LargeScaleConfig.balanced_config,
1069
+ 'aggressive': LargeScaleConfig.aggressive_config,
1070
+ 'memory_optimized': LargeScaleConfig.memory_optimized_config
1071
+ }
1072
+
1073
+ if config_type not in config_methods:
1074
+ logger.warning(f"未知的配置类型: {config_type},使用默认配置")
1075
+ settings = SettingManager()
1076
+ else:
1077
+ config = config_methods[config_type](concurrency)
1078
+ settings = SettingManager()
1079
+ settings.update(config)
1080
+
1081
+ return CrawlerProcess(
1082
+ settings=settings,
1083
+ max_concurrency=concurrency,
1084
+ **kwargs
1085
+ )
1086
+
1087
+ except ImportError:
1088
+ logger.warning("大规模配置模块不存在,使用默认配置")
1089
+ return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1090
+
1091
+
1092
+ # === 导出接口 ===
1093
+
1094
+ __all__ = [
1095
+ 'Crawler',
1096
+ 'CrawlerProcess',
1097
+ 'CrawlerContext',
1098
+ 'create_crawler_with_optimizations',
1099
+ 'create_process_with_large_scale_config'
1111
1100
  ]