crawlo 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (68) hide show
  1. crawlo/__init__.py +2 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +68 -42
  4. crawlo/commands/list.py +102 -93
  5. crawlo/commands/startproject.py +89 -4
  6. crawlo/commands/utils.py +187 -0
  7. crawlo/config.py +280 -0
  8. crawlo/core/engine.py +16 -3
  9. crawlo/core/enhanced_engine.py +190 -0
  10. crawlo/core/scheduler.py +113 -8
  11. crawlo/crawler.py +840 -307
  12. crawlo/downloader/__init__.py +181 -17
  13. crawlo/downloader/aiohttp_downloader.py +15 -2
  14. crawlo/downloader/cffi_downloader.py +11 -1
  15. crawlo/downloader/httpx_downloader.py +14 -3
  16. crawlo/filters/__init__.py +122 -5
  17. crawlo/filters/aioredis_filter.py +128 -36
  18. crawlo/filters/memory_filter.py +99 -32
  19. crawlo/middleware/proxy.py +11 -8
  20. crawlo/middleware/retry.py +40 -5
  21. crawlo/mode_manager.py +201 -0
  22. crawlo/network/__init__.py +17 -3
  23. crawlo/network/request.py +118 -10
  24. crawlo/network/response.py +131 -28
  25. crawlo/pipelines/__init__.py +1 -1
  26. crawlo/pipelines/csv_pipeline.py +317 -0
  27. crawlo/pipelines/json_pipeline.py +219 -0
  28. crawlo/queue/__init__.py +0 -0
  29. crawlo/queue/pqueue.py +37 -0
  30. crawlo/queue/queue_manager.py +304 -0
  31. crawlo/queue/redis_priority_queue.py +192 -0
  32. crawlo/settings/default_settings.py +68 -9
  33. crawlo/spider/__init__.py +576 -66
  34. crawlo/task_manager.py +4 -1
  35. crawlo/templates/project/middlewares.py.tmpl +56 -45
  36. crawlo/templates/project/pipelines.py.tmpl +308 -36
  37. crawlo/templates/project/run.py.tmpl +239 -0
  38. crawlo/templates/project/settings.py.tmpl +211 -17
  39. crawlo/templates/spider/spider.py.tmpl +153 -7
  40. crawlo/utils/controlled_spider_mixin.py +336 -0
  41. crawlo/utils/large_scale_config.py +287 -0
  42. crawlo/utils/large_scale_helper.py +344 -0
  43. crawlo/utils/queue_helper.py +176 -0
  44. crawlo/utils/request_serializer.py +220 -0
  45. crawlo-1.1.2.dist-info/METADATA +567 -0
  46. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/RECORD +54 -46
  47. tests/test_final_validation.py +154 -0
  48. tests/test_redis_config.py +29 -0
  49. tests/test_redis_queue.py +225 -0
  50. tests/test_request_serialization.py +71 -0
  51. tests/test_scheduler.py +242 -0
  52. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  53. crawlo/utils/pqueue.py +0 -174
  54. crawlo-1.1.1.dist-info/METADATA +0 -220
  55. examples/baidu_spider/__init__.py +0 -7
  56. examples/baidu_spider/demo.py +0 -94
  57. examples/baidu_spider/items.py +0 -46
  58. examples/baidu_spider/middleware.py +0 -49
  59. examples/baidu_spider/pipeline.py +0 -55
  60. examples/baidu_spider/run.py +0 -27
  61. examples/baidu_spider/settings.py +0 -121
  62. examples/baidu_spider/spiders/__init__.py +0 -7
  63. examples/baidu_spider/spiders/bai_du.py +0 -61
  64. examples/baidu_spider/spiders/miit.py +0 -159
  65. examples/baidu_spider/spiders/sina.py +0 -79
  66. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  67. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  68. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,9 +1,37 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding: UTF-8 -*-
3
+ """
4
+ Crawlo Crawler Module
5
+ ====================
6
+ 提供爬虫进程管理和运行时核心功能。
7
+
8
+ 核心组件:
9
+ - Crawler: 单个爬虫运行实例,管理Spider与引擎的生命周期
10
+ - CrawlerProcess: 爬虫进程管理器,支持多爬虫并发调度和资源管理
11
+
12
+ 功能特性:
13
+ - 智能并发控制和资源管理
14
+ - 优雅关闭和信号处理
15
+ - 统计监控和性能追踪
16
+ - 自动模块发现和注册
17
+ - 错误恢复和重试机制
18
+ - 大规模爬虫优化支持
19
+
20
+ 示例用法:
21
+ # 单个爬虫运行
22
+ crawler = Crawler(MySpider, settings)
23
+ await crawler.crawl()
24
+
25
+ # 多爬虫并发管理
26
+ process = CrawlerProcess()
27
+ await process.crawl([Spider1, Spider2])
28
+ """
3
29
  from __future__ import annotations
4
30
  import asyncio
5
31
  import signal
6
- from typing import Type, Optional, Set, List, Union, Dict
32
+ import time
33
+ import threading
34
+ from typing import Type, Optional, Set, List, Union, Dict, Any
7
35
  from .spider import Spider, get_global_spider_registry
8
36
  from .core.engine import Engine
9
37
  from .utils.log import get_logger
@@ -18,10 +46,73 @@ from crawlo.project import merge_settings, get_settings
18
46
  logger = get_logger(__name__)
19
47
 
20
48
 
49
+ class CrawlerContext:
50
+ """
51
+ 爬虫上下文管理器
52
+ 提供共享状态和资源管理
53
+ """
54
+
55
+ def __init__(self):
56
+ self.start_time = time.time()
57
+ self.total_crawlers = 0
58
+ self.active_crawlers = 0
59
+ self.completed_crawlers = 0
60
+ self.failed_crawlers = 0
61
+ self.error_log = []
62
+ self._lock = threading.RLock()
63
+
64
+ def increment_total(self):
65
+ with self._lock:
66
+ self.total_crawlers += 1
67
+
68
+ def increment_active(self):
69
+ with self._lock:
70
+ self.active_crawlers += 1
71
+
72
+ def decrement_active(self):
73
+ with self._lock:
74
+ self.active_crawlers -= 1
75
+
76
+ def increment_completed(self):
77
+ with self._lock:
78
+ self.completed_crawlers += 1
79
+
80
+ def increment_failed(self, error: str):
81
+ with self._lock:
82
+ self.failed_crawlers += 1
83
+ self.error_log.append({
84
+ 'timestamp': time.time(),
85
+ 'error': error
86
+ })
87
+
88
+ def get_stats(self) -> Dict[str, Any]:
89
+ with self._lock:
90
+ duration = time.time() - self.start_time
91
+ return {
92
+ 'total_crawlers': self.total_crawlers,
93
+ 'active_crawlers': self.active_crawlers,
94
+ 'completed_crawlers': self.completed_crawlers,
95
+ 'failed_crawlers': self.failed_crawlers,
96
+ 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
97
+ 'duration_seconds': round(duration, 2),
98
+ 'error_count': len(self.error_log)
99
+ }
100
+
101
+
21
102
  class Crawler:
22
- """单个爬虫运行实例,绑定 Spider 与引擎"""
103
+ """
104
+ 单个爬虫运行实例,管理 Spider 与引擎的生命周期
105
+
106
+ 提供功能:
107
+ - Spider 生命周期管理(初始化、运行、关闭)
108
+ - 引擎组件的协调管理
109
+ - 配置合并和验证
110
+ - 统计数据收集
111
+ - 扩展管理
112
+ - 异常处理和清理
113
+ """
23
114
 
24
- def __init__(self, spider_cls: Type[Spider], settings: SettingManager):
115
+ def __init__(self, spider_cls: Type[Spider], settings: SettingManager, context: Optional[CrawlerContext] = None):
25
116
  self.spider_cls = spider_cls
26
117
  self.spider: Optional[Spider] = None
27
118
  self.engine: Optional[Engine] = None
@@ -29,124 +120,455 @@ class Crawler:
29
120
  self.subscriber: Optional[Subscriber] = None
30
121
  self.extension: Optional[ExtensionManager] = None
31
122
  self.settings: SettingManager = settings.copy()
123
+ self.context = context or CrawlerContext()
124
+
125
+ # 状态管理
32
126
  self._closed = False
33
127
  self._close_lock = asyncio.Lock()
128
+ self._start_time = None
129
+ self._end_time = None
130
+
131
+ # 性能监控
132
+ self._performance_metrics = {
133
+ 'initialization_time': 0,
134
+ 'crawl_duration': 0,
135
+ 'memory_peak': 0,
136
+ 'request_count': 0,
137
+ 'error_count': 0
138
+ }
34
139
 
35
140
  async def crawl(self):
36
- """启动爬虫核心流程"""
37
- self.subscriber = self._create_subscriber()
38
- self.spider = self._create_spider()
39
- self.engine = self._create_engine()
40
- self.stats = self._create_stats()
41
- self.extension = self._create_extension()
42
- await self.engine.start_spider(self.spider)
141
+ """
142
+ 启动爬虫核心流程(增强版)
143
+
144
+ 包含以下阶段:
145
+ 1. 初始化阶段: 创建所有组件
146
+ 2. 验证阶段: 检查配置和状态
147
+ 3. 运行阶段: 启动爬虫引擎
148
+ 4. 清理阶段: 资源释放
149
+ """
150
+ init_start = time.time()
151
+ self._start_time = init_start
152
+
153
+ try:
154
+ # 更新上下文状态
155
+ self.context.increment_active()
156
+
157
+ # 阶段 1: 初始化组件
158
+ self.subscriber = self._create_subscriber()
159
+ self.spider = self._create_spider()
160
+ self.engine = self._create_engine()
161
+ self.stats = self._create_stats()
162
+ self.extension = self._create_extension()
163
+
164
+ # 记录初始化时间
165
+ self._performance_metrics['initialization_time'] = time.time() - init_start
166
+
167
+ # 阶段 2: 验证状态
168
+ self._validate_crawler_state()
169
+
170
+ # 阶段 3: 启动爬虫
171
+ crawl_start = time.time()
172
+ await self.engine.start_spider(self.spider)
173
+
174
+ # 记录爬取时间
175
+ self._performance_metrics['crawl_duration'] = time.time() - crawl_start
176
+ self._end_time = time.time()
177
+
178
+ # 更新上下文状态
179
+ self.context.increment_completed()
180
+
181
+ logger.info(f"爬虫 {self.spider.name} 完成,耗时 {self._get_total_duration():.2f}秒")
182
+
183
+ except Exception as e:
184
+ self._performance_metrics['error_count'] += 1
185
+ self.context.increment_failed(str(e))
186
+ logger.error(f"爬虫 {getattr(self.spider, 'name', 'Unknown')} 运行失败: {e}", exc_info=True)
187
+ raise
188
+ finally:
189
+ self.context.decrement_active()
190
+ # 确保资源清理
191
+ await self._ensure_cleanup()
43
192
 
193
+ def _validate_crawler_state(self):
194
+ """
195
+ 验证爬虫状态和配置
196
+ 确保所有必要组件都已正确初始化
197
+ """
198
+ if not self.spider:
199
+ raise RuntimeError("爬虫实例未初始化")
200
+ if not self.engine:
201
+ raise RuntimeError("引擎未初始化")
202
+ if not self.stats:
203
+ raise RuntimeError("统计收集器未初始化")
204
+ if not self.subscriber:
205
+ raise RuntimeError("事件订阅器未初始化")
206
+
207
+ # 检查关键配置
208
+ if not self.spider.name:
209
+ raise ValueError("爬虫名称不能为空")
210
+
211
+ logger.debug(f"爬虫 {self.spider.name} 状态验证通过")
212
+
213
+ def _get_total_duration(self) -> float:
214
+ """获取总运行时间"""
215
+ if self._start_time and self._end_time:
216
+ return self._end_time - self._start_time
217
+ return 0.0
218
+
219
+ async def _ensure_cleanup(self):
220
+ """确保资源清理"""
221
+ try:
222
+ if not self._closed:
223
+ await self.close()
224
+ except Exception as e:
225
+ logger.warning(f"清理资源时发生错误: {e}")
226
+
227
+ def get_performance_metrics(self) -> Dict[str, Any]:
228
+ """获取性能指标"""
229
+ metrics = self._performance_metrics.copy()
230
+ metrics['total_duration'] = self._get_total_duration()
231
+ if self.stats:
232
+ # 添加统计数据
233
+ stats_data = getattr(self.stats, 'get_stats', lambda: {})()
234
+ metrics.update(stats_data)
235
+ return metrics
44
236
  @staticmethod
45
237
  def _create_subscriber() -> Subscriber:
238
+ """创建事件订阅器"""
46
239
  return Subscriber()
47
240
 
48
241
  def _create_spider(self) -> Spider:
242
+ """
243
+ 创建并验证爬虫实例(增强版)
244
+
245
+ 执行以下验证:
246
+ - 爬虫名称必须存在
247
+ - start_requests 方法必须可调用
248
+ - start_urls 不能是字符串
249
+ - parse 方法建议存在
250
+ """
49
251
  spider = self.spider_cls.create_instance(self)
50
252
 
253
+ # 必要属性检查
51
254
  if not getattr(spider, 'name', None):
52
- raise AttributeError(f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。")
255
+ raise AttributeError(
256
+ f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。\n"
257
+ f"示例: name = 'my_spider'"
258
+ )
53
259
 
54
260
  if not callable(getattr(spider, 'start_requests', None)):
55
- raise AttributeError(f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。")
261
+ raise AttributeError(
262
+ f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。\n"
263
+ f"示例: def start_requests(self): yield Request(url='...')"
264
+ )
56
265
 
266
+ # start_urls 类型检查
57
267
  start_urls = getattr(spider, 'start_urls', [])
58
268
  if isinstance(start_urls, str):
59
- raise TypeError(f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。")
269
+ raise TypeError(
270
+ f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。\n"
271
+ f"正确写法: start_urls = ['http://example.com']\n"
272
+ f"错误写法: start_urls = 'http://example.com'"
273
+ )
60
274
 
275
+ # parse 方法检查(警告而非错误)
61
276
  if not callable(getattr(spider, 'parse', None)):
62
277
  logger.warning(
63
- f"爬虫 '{spider.name}' 未定义 'parse' 方法。请确保所有 Request 都指定了回调函数,否则响应将被忽略。")
64
-
278
+ f"爬虫 '{spider.name}' 未定义 'parse' 方法。\n"
279
+ f"请确保所有 Request 都指定了回调函数,否则响应将被忽略。"
280
+ )
281
+
282
+ # 设置爬虫配置
65
283
  self._set_spider(spider)
284
+
285
+ logger.debug(f"爬虫 '{spider.name}' 初始化完成")
66
286
  return spider
67
287
 
68
288
  def _create_engine(self) -> Engine:
289
+ """创建并初始化引擎"""
69
290
  engine = Engine(self)
70
291
  engine.engine_start()
292
+ logger.debug(f"引擎初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
71
293
  return engine
72
294
 
73
295
  def _create_stats(self) -> StatsCollector:
74
- return StatsCollector(self)
296
+ """创建统计收集器"""
297
+ stats = StatsCollector(self)
298
+ logger.debug(f"统计收集器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
299
+ return stats
75
300
 
76
301
  def _create_extension(self) -> ExtensionManager:
77
- return ExtensionManager.create_instance(self)
302
+ """创建扩展管理器"""
303
+ extension = ExtensionManager.create_instance(self)
304
+ logger.debug(f"扩展管理器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
305
+ return extension
78
306
 
79
307
  def _set_spider(self, spider: Spider):
308
+ """
309
+ 设置爬虫配置和事件订阅
310
+ 将爬虫的生命周期事件与订阅器绑定
311
+ """
312
+ # 订阅爬虫生命周期事件
80
313
  self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
81
314
  self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
315
+
316
+ # 合并爬虫自定义配置
82
317
  merge_settings(spider, self.settings)
318
+
319
+ logger.debug(f"爬虫 '{spider.name}' 配置合并完成")
83
320
 
84
321
  async def close(self, reason='finished') -> None:
322
+ """
323
+ 关闭爬虫并清理资源(增强版)
324
+
325
+ 确保只关闭一次,并处理所有清理操作
326
+ """
85
327
  async with self._close_lock:
86
328
  if self._closed:
87
329
  return
330
+
88
331
  self._closed = True
89
- await self.subscriber.notify(spider_closed)
90
- if self.stats and self.spider:
91
- self.stats.close_spider(spider=self.spider, reason=reason)
92
- from crawlo.commands.stats import record_stats
93
- record_stats(self)
332
+ self._end_time = time.time()
333
+
334
+ try:
335
+ # 通知爬虫关闭事件
336
+ if self.subscriber:
337
+ await self.subscriber.notify(spider_closed)
338
+
339
+ # 统计数据收集
340
+ if self.stats and self.spider:
341
+ self.stats.close_spider(spider=self.spider, reason=reason)
342
+ # 记录统计数据
343
+ try:
344
+ from crawlo.commands.stats import record_stats
345
+ record_stats(self)
346
+ except ImportError:
347
+ logger.debug("统计记录模块不存在,跳过统计记录")
348
+
349
+ logger.info(
350
+ f"爬虫 '{getattr(self.spider, 'name', 'Unknown')}' 已关闭,"
351
+ f"原因: {reason},耗时: {self._get_total_duration():.2f}秒"
352
+ )
353
+
354
+ except Exception as e:
355
+ logger.error(f"关闭爬虫时发生错误: {e}", exc_info=True)
356
+ finally:
357
+ # 确保资源清理
358
+ await self._cleanup_resources()
359
+
360
+ async def _cleanup_resources(self):
361
+ """清理所有资源"""
362
+ cleanup_tasks = []
363
+
364
+ # 引擎清理
365
+ if self.engine:
366
+ try:
367
+ cleanup_tasks.append(self.engine.close())
368
+ except AttributeError:
369
+ pass # 引擎没有close方法
370
+
371
+ # 扩展清理
372
+ if self.extension:
373
+ try:
374
+ cleanup_tasks.append(self.extension.close())
375
+ except AttributeError:
376
+ pass
377
+
378
+ # 统计收集器清理
379
+ if self.stats:
380
+ try:
381
+ cleanup_tasks.append(self.stats.close())
382
+ except AttributeError:
383
+ pass
384
+
385
+ # 并发执行清理任务
386
+ if cleanup_tasks:
387
+ await asyncio.gather(*cleanup_tasks, return_exceptions=True)
388
+
389
+ logger.debug("资源清理完成")
94
390
 
95
391
 
96
392
  class CrawlerProcess:
97
393
  """
98
- 爬虫进程管理器,支持:
99
- - 自动发现爬虫模块
100
- - 通过 name 或类启动爬虫
101
- - 并发控制
102
- - 优雅关闭
394
+ 爬虫进程管理器(增强版)
395
+
396
+ 支持功能:
397
+ - 多爬虫并发调度和资源管理
398
+ - 自动模块发现和爬虫注册
399
+ - 智能并发控制和负载均衡
400
+ - 优雅关闭和信号处理
401
+ - 实时状态监控和统计
402
+ - 错误恢复和重试机制
403
+ - 大规模爬虫优化支持
404
+
405
+ 使用示例:
406
+ # 基本用法
407
+ process = CrawlerProcess()
408
+ await process.crawl(MySpider)
409
+
410
+ # 多爬虫并发
411
+ await process.crawl([Spider1, Spider2, 'spider_name'])
412
+
413
+ # 自定义并发数
414
+ process = CrawlerProcess(max_concurrency=8)
103
415
  """
104
416
 
105
417
  def __init__(
106
418
  self,
107
419
  settings: Optional[SettingManager] = None,
108
420
  max_concurrency: Optional[int] = None,
109
- spider_modules: Optional[List[str]] = None
421
+ spider_modules: Optional[List[str]] = None,
422
+ enable_monitoring: bool = True
110
423
  ):
424
+ # 基础配置
111
425
  self.settings: SettingManager = settings or self._get_default_settings()
112
426
  self.crawlers: Set[Crawler] = set()
113
427
  self._active_tasks: Set[asyncio.Task] = set()
114
-
115
- # 自动发现并导入爬虫模块
116
- if spider_modules:
117
- self.auto_discover(spider_modules)
118
-
119
- # 使用全局注册表的快照(避免后续导入影响)
120
- self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
121
-
428
+
429
+ # 上下文管理器
430
+ self.context = CrawlerContext()
431
+
432
+ # 并发控制配置
122
433
  self.max_concurrency: int = (
123
434
  max_concurrency
124
435
  or self.settings.get('MAX_RUNNING_SPIDERS')
125
436
  or self.settings.get('CONCURRENCY', 3)
126
437
  )
127
438
  self.semaphore = asyncio.Semaphore(self.max_concurrency)
439
+
440
+ # 监控配置
441
+ self.enable_monitoring = enable_monitoring
442
+ self._monitoring_task = None
443
+ self._shutdown_event = asyncio.Event()
444
+
445
+ # 自动发现并导入爬虫模块
446
+ if spider_modules:
447
+ self.auto_discover(spider_modules)
448
+
449
+ # 使用全局注册表的快照(避免后续导入影响)
450
+ self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
451
+
452
+ # 性能监控
453
+ self._performance_stats = {
454
+ 'total_requests': 0,
455
+ 'successful_requests': 0,
456
+ 'failed_requests': 0,
457
+ 'memory_usage_mb': 0,
458
+ 'cpu_usage_percent': 0
459
+ }
128
460
 
129
461
  # 注册信号量
130
462
  signal.signal(signal.SIGINT, self._shutdown)
131
463
  signal.signal(signal.SIGTERM, self._shutdown)
132
- logger.info(f"CrawlerProcess 初始化完成,最大并行爬虫数: {self.max_concurrency}")
464
+
465
+ logger.info(
466
+ f"CrawlerProcess 初始化完成\n"
467
+ f" - 最大并行爬虫数: {self.max_concurrency}\n"
468
+ f" - 已注册爬虫数: {len(self._spider_registry)}\n"
469
+ f" - 监控启用: {self.enable_monitoring}"
470
+ )
133
471
 
472
+ async def start_monitoring(self):
473
+ """启动监控任务"""
474
+ if not self.enable_monitoring:
475
+ return
476
+
477
+ self._monitoring_task = asyncio.create_task(self._monitor_loop())
478
+ logger.debug("监控任务已启动")
479
+
480
+ async def stop_monitoring(self):
481
+ """停止监控任务"""
482
+ if self._monitoring_task and not self._monitoring_task.done():
483
+ self._monitoring_task.cancel()
484
+ try:
485
+ await self._monitoring_task
486
+ except asyncio.CancelledError:
487
+ pass
488
+ logger.debug("监控任务已停止")
489
+
490
+ async def _monitor_loop(self):
491
+ """监控循环,定期收集和报告状态"""
492
+ try:
493
+ while not self._shutdown_event.is_set():
494
+ await self._collect_performance_stats()
495
+
496
+ # 每30秒输出一次状态
497
+ stats = self.context.get_stats()
498
+ if stats['active_crawlers'] > 0:
499
+ logger.info(
500
+ f"爬虫状态: 活跃 {stats['active_crawlers']}, "
501
+ f"完成 {stats['completed_crawlers']}, "
502
+ f"失败 {stats['failed_crawlers']}, "
503
+ f"成功率 {stats['success_rate']:.1f}%"
504
+ )
505
+
506
+ await asyncio.sleep(30) # 30秒间隔
507
+
508
+ except asyncio.CancelledError:
509
+ logger.debug("监控循环被取消")
510
+ except Exception as e:
511
+ logger.error(f"监控循环错误: {e}", exc_info=True)
512
+
513
+ async def _collect_performance_stats(self):
514
+ """收集性能统计数据"""
515
+ try:
516
+ import psutil
517
+ import os
518
+
519
+ process = psutil.Process(os.getpid())
520
+ memory_info = process.memory_info()
521
+
522
+ self._performance_stats.update({
523
+ 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
524
+ 'cpu_usage_percent': round(process.cpu_percent(), 2)
525
+ })
526
+
527
+ except ImportError:
528
+ # psutil 不存在时跳过性能监控
529
+ pass
530
+ except Exception as e:
531
+ logger.debug(f"收集性能统计失败: {e}")
134
532
  @staticmethod
135
533
  def auto_discover(modules: List[str]):
136
- """自动导入模块,触发 Spider 类定义和注册"""
534
+ """
535
+ 自动导入模块,触发 Spider 类定义和注册(增强版)
536
+
537
+ 支持递归扫描和错误恢复
538
+ """
137
539
  import importlib
138
540
  import pkgutil
541
+
542
+ discovered_count = 0
543
+ error_count = 0
544
+
139
545
  for module_name in modules:
140
546
  try:
141
547
  module = importlib.import_module(module_name)
548
+
142
549
  if hasattr(module, '__path__'):
550
+ # 包模块,递归扫描
143
551
  for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
144
- importlib.import_module(name)
552
+ try:
553
+ importlib.import_module(name)
554
+ discovered_count += 1
555
+ except Exception as sub_e:
556
+ error_count += 1
557
+ logger.warning(f"导入子模块 {name} 失败: {sub_e}")
145
558
  else:
559
+ # 单个模块
146
560
  importlib.import_module(module_name)
561
+ discovered_count += 1
562
+
147
563
  logger.debug(f"已扫描模块: {module_name}")
564
+
148
565
  except Exception as e:
566
+ error_count += 1
149
567
  logger.error(f"扫描模块 {module_name} 失败: {e}", exc_info=True)
568
+
569
+ logger.info(
570
+ f"模块发现完成: 成功 {discovered_count} 个,失败 {error_count} 个"
571
+ )
150
572
 
151
573
  # === 公共只读接口:避免直接访问 _spider_registry ===
152
574
 
@@ -163,333 +585,444 @@ class CrawlerProcess:
163
585
  return name in self._spider_registry
164
586
 
165
587
  async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
166
- """启动一个或多个爬虫"""
588
+ """
589
+ 启动一个或多个爬虫(增强版)
590
+
591
+ 增强功能:
592
+ - 智能并发控制
593
+ - 实时监控和统计
594
+ - 错误恢复和重试
595
+ - 优雅关闭处理
596
+ """
597
+ # 阶段 1: 预处理和验证
167
598
  spider_classes_to_run = self._resolve_spiders_to_run(spiders)
168
599
  total = len(spider_classes_to_run)
169
600
 
170
601
  if total == 0:
171
602
  raise ValueError("至少需要提供一个爬虫类或名称")
172
603
 
173
- # 按类名排序,保证启动顺序可预测
174
- spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
175
- logger.info(f"启动 {total} 个爬虫.")
604
+ # 阶段 2: 初始化上下文和监控
605
+ for _ in range(total):
606
+ self.context.increment_total()
607
+
608
+ # 启动监控任务
609
+ await self.start_monitoring()
610
+
611
+ try:
612
+ # 阶段 3: 按类名排序,保证启动顺序可预测
613
+ spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
614
+
615
+ logger.info(
616
+ f"开始启动 {total} 个爬虫\n"
617
+ f" - 最大并发数: {self.max_concurrency}\n"
618
+ f" - 爬虫列表: {[cls.__name__ for cls in spider_classes_to_run]}"
619
+ )
620
+
621
+ # 阶段 4: 流式启动所有爬虫任务
622
+ tasks = [
623
+ asyncio.create_task(
624
+ self._run_spider_with_limit(spider_cls, index + 1, total),
625
+ name=f"spider-{spider_cls.__name__}-{index+1}"
626
+ )
627
+ for index, spider_cls in enumerate(spider_classes_to_run)
628
+ ]
176
629
 
177
- # 流式启动
178
- tasks = [
179
- asyncio.create_task(self._run_spider_with_limit(spider_cls, index + 1, total))
180
- for index, spider_cls in enumerate(spider_classes_to_run)
181
- ]
630
+ # 阶段 5: 等待所有任务完成(失败不中断)
631
+ results = await asyncio.gather(*tasks, return_exceptions=True)
182
632
 
183
- # 等待完成(失败不中断)
184
- results = await asyncio.gather(*tasks, return_exceptions=True)
185
- failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
186
- if failed:
187
- logger.error(f"共 {len(failed)} 个爬虫执行异常: {[spider_classes_to_run[i].__name__ for i in failed]}")
633
+ # 阶段 6: 统计异常和结果
634
+ failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
635
+ successful = total - len(failed)
636
+
637
+ if failed:
638
+ failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
639
+ logger.error(
640
+ f"爬虫执行结果: 成功 {successful}/{total},失败 {len(failed)}/{total}\n"
641
+ f" - 失败爬虫: {failed_spiders}"
642
+ )
643
+
644
+ # 记录详细错误信息
645
+ for i in failed:
646
+ error = results[i]
647
+ logger.error(f"爬虫 {spider_classes_to_run[i].__name__} 错误详情: {error}")
648
+ else:
649
+ logger.info(f"所有 {total} 个爬虫均成功完成! 🎉")
650
+
651
+ # 返回统计结果
652
+ return {
653
+ 'total': total,
654
+ 'successful': successful,
655
+ 'failed': len(failed),
656
+ 'success_rate': (successful / total) * 100 if total > 0 else 0,
657
+ 'context_stats': self.context.get_stats()
658
+ }
659
+
660
+ finally:
661
+ # 阶段 7: 清理和关闭
662
+ await self.stop_monitoring()
663
+ await self._cleanup_process()
188
664
 
665
+ async def _cleanup_process(self):
666
+ """清理进程资源"""
667
+ try:
668
+ # 等待所有活跃爬虫完成
669
+ if self.crawlers:
670
+ close_tasks = [crawler.close() for crawler in self.crawlers]
671
+ await asyncio.gather(*close_tasks, return_exceptions=True)
672
+ self.crawlers.clear()
673
+
674
+ # 清理活跃任务
675
+ if self._active_tasks:
676
+ for task in list(self._active_tasks):
677
+ if not task.done():
678
+ task.cancel()
679
+ await asyncio.gather(*self._active_tasks, return_exceptions=True)
680
+ self._active_tasks.clear()
681
+
682
+ logger.debug("进程资源清理完成")
683
+
684
+ except Exception as e:
685
+ logger.error(f"清理进程资源时发生错误: {e}", exc_info=True)
686
+
687
+ def get_process_stats(self) -> Dict[str, Any]:
688
+ """获取进程统计信息"""
689
+ context_stats = self.context.get_stats()
690
+
691
+ return {
692
+ 'context': context_stats,
693
+ 'performance': self._performance_stats.copy(),
694
+ 'crawlers': {
695
+ 'total_registered': len(self._spider_registry),
696
+ 'active_crawlers': len(self.crawlers),
697
+ 'max_concurrency': self.max_concurrency
698
+ },
699
+ 'registry': {
700
+ 'spider_names': list(self._spider_registry.keys()),
701
+ 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
702
+ }
703
+ }
189
704
  def _resolve_spiders_to_run(
190
705
  self,
191
706
  spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
192
707
  ) -> List[Type[Spider]]:
193
- """解析输入为爬虫类列表"""
708
+ """
709
+ 解析输入为爬虫类列表
710
+
711
+ 支持各种输入格式并验证唯一性
712
+ """
194
713
  inputs = self._normalize_inputs(spiders_input)
195
714
  seen_spider_names: Set[str] = set()
196
715
  spider_classes: List[Type[Spider]] = []
197
-
716
+
198
717
  for item in inputs:
199
- spider_cls = self._resolve_spider_class(item)
200
- spider_name = spider_cls.name
718
+ try:
719
+ spider_cls = self._resolve_spider_class(item)
720
+ spider_name = getattr(spider_cls, 'name', None)
721
+
722
+ if not spider_name:
723
+ raise ValueError(f"爬虫类 {spider_cls.__name__} 缺少 'name' 属性")
201
724
 
202
- if spider_name in seen_spider_names:
203
- raise ValueError(f"本次运行中爬虫名称 '{spider_name}' 重复。")
725
+ if spider_name in seen_spider_names:
726
+ raise ValueError(
727
+ f"本次运行中爬虫名称 '{spider_name}' 重复。\n"
728
+ f"请确保每个爬虫的 name 属性在本次运行中唯一。"
729
+ )
204
730
 
205
- seen_spider_names.add(spider_name)
206
- spider_classes.append(spider_cls)
731
+ seen_spider_names.add(spider_name)
732
+ spider_classes.append(spider_cls)
733
+
734
+ logger.debug(f"解析爬虫成功: {item} -> {spider_cls.__name__} (name='{spider_name}')")
735
+
736
+ except Exception as e:
737
+ logger.error(f"解析爬虫失败: {item} - {e}")
738
+ raise
207
739
 
208
740
  return spider_classes
209
741
 
210
742
  @staticmethod
211
743
  def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
212
- """标准化输入为列表"""
744
+ """
745
+ 标准化输入为列表
746
+
747
+ 支持更多输入类型并提供更好的错误信息
748
+ """
213
749
  if isinstance(spiders_input, (type, str)):
214
750
  return [spiders_input]
215
- elif isinstance(spiders_input, (list, tuple)):
216
- return list(spiders_input)
751
+ elif isinstance(spiders_input, (list, tuple, set)):
752
+ spider_list = list(spiders_input)
753
+ if not spider_list:
754
+ raise ValueError("爬虫列表不能为空")
755
+ return spider_list
217
756
  else:
218
- raise TypeError("spiders 必须是爬虫类、name 字符串,或它们的列表/元组")
757
+ raise TypeError(
758
+ f"spiders 参数类型不支持: {type(spiders_input)}\n"
759
+ f"支持的类型: Spider类、name字符串,或它们的列表/元组/集合"
760
+ )
219
761
 
220
762
  def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
221
- """解析单个输入项为爬虫类"""
763
+ """
764
+ 解析单个输入项为爬虫类
765
+
766
+ 提供更好的错误提示和调试信息
767
+ """
222
768
  if isinstance(item, type) and issubclass(item, Spider):
769
+ # 直接是 Spider 类
223
770
  return item
224
771
  elif isinstance(item, str):
772
+ # 是字符串名称,需要查找注册表
225
773
  spider_cls = self._spider_registry.get(item)
226
774
  if not spider_cls:
227
- raise ValueError(f"未找到名为 '{item}' 的爬虫。")
775
+ available_spiders = list(self._spider_registry.keys())
776
+ raise ValueError(
777
+ f"未找到名为 '{item}' 的爬虫。\n"
778
+ f"已注册的爬虫: {available_spiders}\n"
779
+ f"请检查爬虫名称是否正确,或者确保爬虫已被正确导入和注册。"
780
+ )
228
781
  return spider_cls
229
782
  else:
230
- raise TypeError(f"无效类型 {type(item)}。必须是 Spider 类或字符串 name。")
783
+ raise TypeError(
784
+ f"无效类型 {type(item)}: {item}\n"
785
+ f"必须是 Spider 类或字符串 name。\n"
786
+ f"示例: MySpider 或 'my_spider'"
787
+ )
231
788
 
232
789
  async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
233
- """受信号量限制的爬虫运行函数"""
790
+ """
791
+ 受信号量限制的爬虫运行函数
792
+
793
+ 包含增强的错误处理和监控功能
794
+ """
234
795
  task = asyncio.current_task()
235
- self._active_tasks.add(task)
796
+ crawler = None
797
+
236
798
  try:
799
+ # 注册任务
800
+ if task:
801
+ self._active_tasks.add(task)
802
+
803
+ # 获取并发许可
237
804
  await self.semaphore.acquire()
238
- logger.info(f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}")
239
- crawler = Crawler(spider_cls, self.settings)
805
+
806
+ start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
807
+ logger.info(start_msg)
808
+
809
+ # 创建并运行爬虫
810
+ crawler = Crawler(spider_cls, self.settings, self.context)
240
811
  self.crawlers.add(crawler)
812
+
813
+ # 记录启动时间
814
+ start_time = time.time()
815
+
816
+ # 运行爬虫
241
817
  await crawler.crawl()
242
- logger.info(f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}")
818
+
819
+ # 计算运行时间
820
+ duration = time.time() - start_time
821
+
822
+ end_msg = (
823
+ f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}, "
824
+ f"耗时: {duration:.2f}秒"
825
+ )
826
+ logger.info(end_msg)
827
+
828
+ # 记录成功统计
829
+ self._performance_stats['successful_requests'] += 1
830
+
243
831
  except Exception as e:
244
- logger.error(f"爬虫 {spider_cls.__name__} 执行失败: {e}", exc_info=True)
832
+ # 记录失败统计
833
+ self._performance_stats['failed_requests'] += 1
834
+
835
+ error_msg = f"爬虫 {spider_cls.__name__} 执行失败: {e}"
836
+ logger.error(error_msg, exc_info=True)
837
+
838
+ # 将错误信息记录到上下文
839
+ if hasattr(self, 'context'):
840
+ self.context.increment_failed(error_msg)
841
+
245
842
  raise
246
843
  finally:
247
- if task in self._active_tasks:
248
- self._active_tasks.remove(task)
249
- self.semaphore.release()
844
+ # 清理资源
845
+ try:
846
+ if crawler and crawler in self.crawlers:
847
+ self.crawlers.remove(crawler)
848
+
849
+ if task and task in self._active_tasks:
850
+ self._active_tasks.remove(task)
851
+
852
+ self.semaphore.release()
853
+
854
+ except Exception as cleanup_error:
855
+ logger.warning(f"清理资源时发生错误: {cleanup_error}")
250
856
 
251
857
  def _shutdown(self, _signum, _frame):
252
- """优雅关闭信号处理"""
253
- logger.warning("收到关闭信号,正在停止所有爬虫...")
858
+ """
859
+ 优雅关闭信号处理(增强版)
860
+
861
+ 提供更好的关闭体验和资源清理
862
+ """
863
+ signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
864
+ logger.warning(f"收到关闭信号 {signal_name},正在停止所有爬虫...")
865
+
866
+ # 设置关闭事件
867
+ if hasattr(self, '_shutdown_event'):
868
+ self._shutdown_event.set()
869
+
870
+ # 停止所有爬虫引擎
254
871
  for crawler in list(self.crawlers):
255
872
  if crawler.engine:
256
873
  crawler.engine.running = False
257
874
  crawler.engine.normal = False
875
+ logger.debug(f"已停止爬虫引擎: {getattr(crawler.spider, 'name', 'Unknown')}")
876
+
877
+ # 创建关闭任务
258
878
  asyncio.create_task(self._wait_for_shutdown())
879
+
880
+ logger.info("关闭指令已发送,等待爬虫完成当前任务...")
259
881
 
260
882
  async def _wait_for_shutdown(self):
261
- """等待所有活跃任务完成"""
262
- pending = [t for t in self._active_tasks if not t.done()]
263
- if pending:
264
- logger.info(f"等待 {len(pending)} 个活跃任务完成...")
265
- await asyncio.gather(*pending, return_exceptions=True)
266
- logger.info("所有爬虫已优雅关闭")
883
+ """
884
+ 等待所有活跃任务完成(增强版)
885
+
886
+ 提供更好的关闭时间控制和进度反馈
887
+ """
888
+ try:
889
+ # 停止监控任务
890
+ await self.stop_monitoring()
891
+
892
+ # 等待活跃任务完成
893
+ pending = [t for t in self._active_tasks if not t.done()]
894
+
895
+ if pending:
896
+ logger.info(
897
+ f"等待 {len(pending)} 个活跃任务完成..."
898
+ f"(最大等待时间: 30秒)"
899
+ )
900
+
901
+ # 设置超时时间
902
+ try:
903
+ await asyncio.wait_for(
904
+ asyncio.gather(*pending, return_exceptions=True),
905
+ timeout=30.0
906
+ )
907
+ except asyncio.TimeoutError:
908
+ logger.warning("部分任务超时,强制取消中...")
909
+
910
+ # 强制取消超时任务
911
+ for task in pending:
912
+ if not task.done():
913
+ task.cancel()
914
+
915
+ # 等待取消完成
916
+ await asyncio.gather(*pending, return_exceptions=True)
917
+
918
+ # 最终清理
919
+ await self._cleanup_process()
920
+
921
+ # 输出最终统计
922
+ final_stats = self.context.get_stats()
923
+ logger.info(
924
+ f"所有爬虫已优雅关闭 👋\n"
925
+ f" - 总计爬虫: {final_stats['total_crawlers']}\n"
926
+ f" - 成功完成: {final_stats['completed_crawlers']}\n"
927
+ f" - 失败数量: {final_stats['failed_crawlers']}\n"
928
+ f" - 成功率: {final_stats['success_rate']:.1f}%\n"
929
+ f" - 总运行时间: {final_stats['duration_seconds']}秒"
930
+ )
931
+
932
+ except Exception as e:
933
+ logger.error(f"关闭过程中发生错误: {e}", exc_info=True)
267
934
 
268
935
  @classmethod
269
936
  def _get_default_settings(cls) -> SettingManager:
270
- """加载默认配置"""
937
+ """
938
+ 加载默认配置(增强版)
939
+
940
+ 提供更好的错误处理和降级策略
941
+ """
271
942
  try:
272
- return get_settings()
943
+ settings = get_settings()
944
+ logger.debug("成功加载默认配置")
945
+ return settings
273
946
  except Exception as e:
274
- logger.warning(f"无法加载默认配置: {e}")
947
+ logger.warning(f"无法加载默认配置: {e},使用空配置")
275
948
  return SettingManager()
276
949
 
277
- # #!/usr/bin/python
278
- # # -*- coding: UTF-8 -*-
279
- # import asyncio
280
- # import signal
281
- # from typing import Type, Optional, Set, List
282
- #
283
- # from crawlo.spider import Spider
284
- # from crawlo.core.engine import Engine
285
- # from crawlo.utils.log import get_logger
286
- # from crawlo.subscriber import Subscriber
287
- # from crawlo.extension import ExtensionManager
288
- # from crawlo.exceptions import SpiderTypeError
289
- # from crawlo.stats_collector import StatsCollector
290
- # from crawlo.event import spider_opened, spider_closed
291
- # from crawlo.settings.setting_manager import SettingManager
292
- # from crawlo.utils.project import merge_settings, get_settings
293
- #
294
- #
295
- # logger = get_logger(__name__)
296
- #
297
- #
298
- # class Crawler:
299
- # """单个爬虫运行实例,绑定 Spider 与引擎"""
300
- #
301
- # def __init__(self, spider_cls: Type[Spider], settings: SettingManager):
302
- # self.spider_cls = spider_cls
303
- # self.spider: Optional[Spider] = None
304
- # self.engine: Optional[Engine] = None
305
- # self.stats: Optional[StatsCollector] = None
306
- # self.subscriber: Optional[Subscriber] = None
307
- # self.extension: Optional[ExtensionManager] = None
308
- # self.settings: SettingManager = settings.copy()
309
- # self._closed = False # 新增状态
310
- # self._close_lock = asyncio.Lock()
311
- #
312
- # async def crawl(self):
313
- # """启动爬虫核心流程"""
314
- # self.subscriber = self._create_subscriber()
315
- # self.spider = self._create_spider()
316
- # self.engine = self._create_engine()
317
- # self.stats = self._create_stats()
318
- # self.extension = self._create_extension()
319
- #
320
- # await self.engine.start_spider(self.spider)
321
- #
322
- # @staticmethod
323
- # def _create_subscriber() -> Subscriber:
324
- # return Subscriber()
325
- #
326
- # def _create_spider(self) -> Spider:
327
- # spider = self.spider_cls.create_instance(self)
328
- #
329
- # # --- 关键属性检查 ---
330
- # if not getattr(spider, 'name', None):
331
- # raise AttributeError(f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。")
332
- #
333
- # if not callable(getattr(spider, 'start_requests', None)):
334
- # raise AttributeError(f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。")
335
- #
336
- # start_urls = getattr(spider, 'start_urls', [])
337
- # if isinstance(start_urls, str):
338
- # raise TypeError(f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。")
339
- #
340
- # if not callable(getattr(spider, 'parse', None)):
341
- # logger.warning(
342
- # f"爬虫 '{spider.name}' 未定义 'parse' 方法。请确保所有 Request 都指定了回调函数,否则响应将被忽略。")
343
- #
344
- # self._set_spider(spider)
345
- # return spider
346
- #
347
- # def _create_engine(self) -> Engine:
348
- # engine = Engine(self)
349
- # engine.engine_start()
350
- # return engine
351
- #
352
- # def _create_stats(self) -> StatsCollector:
353
- # return StatsCollector(self)
354
- #
355
- # def _create_extension(self) -> ExtensionManager:
356
- # return ExtensionManager.create_instance(self)
357
- #
358
- # def _set_spider(self, spider: Spider):
359
- # self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
360
- # self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
361
- # merge_settings(spider, self.settings)
362
- #
363
- # async def close(self, reason='finished') -> None:
364
- # async with self._close_lock:
365
- # if self._closed:
366
- # return
367
- # self._closed = True
368
- # await self.subscriber.notify(spider_closed)
369
- # if self.stats and self.spider:
370
- # self.stats.close_spider(spider=self.spider, reason=reason)
371
- #
372
- #
373
- # class CrawlerProcess:
374
- # """
375
- # 爬虫进程管理器,支持多爬虫并发调度、信号量控制、实时日志与优雅关闭
376
- # """
377
- #
378
- # def __init__(self, settings: Optional[SettingManager] = None, max_concurrency: Optional[int] = None):
379
- # self.settings: SettingManager = settings or self._get_default_settings()
380
- # self.crawlers: Set[Crawler] = set()
381
- # self._active_tasks: Set[asyncio.Task] = set()
382
- #
383
- # # 使用专用配置,降级使用 CONCURRENCY
384
- # self.max_concurrency: int = (
385
- # max_concurrency
386
- # or self.settings.get('MAX_RUNNING_SPIDERS')
387
- # or self.settings.get('CONCURRENCY', 3)
388
- # )
389
- # self.semaphore = asyncio.Semaphore(self.max_concurrency)
390
- #
391
- # # 注册信号量
392
- # signal.signal(signal.SIGINT, self._shutdown)
393
- # signal.signal(signal.SIGTERM, self._shutdown)
394
- # logger.info(f"CrawlerProcess 初始化完成,最大并行爬虫数: {self.max_concurrency}")
395
- #
396
- # async def crawl(self, spiders):
397
- # """
398
- # 启动一个或多个爬虫,流式调度,支持实时进度反馈
399
- # """
400
- # spider_classes = self._normalize_spiders(spiders)
401
- # total = len(spider_classes)
402
- #
403
- # if total == 0:
404
- # raise ValueError("至少需要提供一个爬虫类")
405
- #
406
- # # 按名称排序
407
- # spider_classes.sort(key=lambda cls: cls.__name__.lower())
408
- #
409
- # logger.info(f"启动 {total} 个爬虫.")
410
- #
411
- # # 流式启动所有爬虫任务
412
- # tasks = [
413
- # asyncio.create_task(self._run_spider_with_limit(spider_cls, index + 1, total))
414
- # for index, spider_cls in enumerate(spider_classes)
415
- # ]
416
- #
417
- # # 等待所有任务完成(失败不中断)
418
- # results = await asyncio.gather(*tasks, return_exceptions=True)
419
- #
420
- # # 统计异常
421
- # failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
422
- # if failed:
423
- # logger.error(f"共 {len(failed)} 个爬虫执行异常: {[spider_classes[i].__name__ for i in failed]}")
424
- #
425
- # @staticmethod
426
- # def _normalize_spiders(spiders) -> List[Type[Spider]]:
427
- # """标准化输入为爬虫类列表"""
428
- # if isinstance(spiders, type) and issubclass(spiders, Spider):
429
- # return [spiders]
430
- # elif isinstance(spiders, (list, tuple)):
431
- # return list(spiders)
432
- # else:
433
- # raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
434
- #
435
- # async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
436
- # """
437
- # 受信号量限制的爬虫运行函数,带进度日志
438
- # """
439
- # task = asyncio.current_task()
440
- # self._active_tasks.add(task)
441
- #
442
- # try:
443
- # # 获取并发许可
444
- # await self.semaphore.acquire()
445
- #
446
- # start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
447
- # logger.info(start_msg)
448
- #
449
- # # 创建并运行爬虫
450
- # crawler = self._create_crawler(spider_cls)
451
- # self.crawlers.add(crawler)
452
- # await crawler.crawl()
453
- #
454
- # end_msg = f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}"
455
- # logger.info(end_msg)
456
- #
457
- # except Exception as e:
458
- # logger.error(f"爬虫 {spider_cls.__name__} 执行失败: {e}", exc_info=True)
459
- # raise
460
- # finally:
461
- # if task in self._active_tasks:
462
- # self._active_tasks.remove(task)
463
- # self.semaphore.release() # 必须释放
464
- #
465
- # def _create_crawler(self, spider_cls: Type[Spider]) -> Crawler:
466
- # """创建爬虫实例"""
467
- # if isinstance(spider_cls, str):
468
- # raise SpiderTypeError(f"不支持字符串形式的爬虫: {spider_cls}")
469
- # return Crawler(spider_cls, self.settings)
470
- #
471
- # def _shutdown(self, _signum, _frame):
472
- # """优雅关闭信号处理"""
473
- # logger.warning("收到关闭信号,正在停止所有爬虫...")
474
- # for crawler in list(self.crawlers):
475
- # if crawler.engine:
476
- # crawler.engine.running = False
477
- # crawler.engine.normal = False
478
- # asyncio.create_task(self._wait_for_shutdown())
479
- #
480
- # async def _wait_for_shutdown(self):
481
- # """等待所有活跃任务完成"""
482
- # pending = [t for t in self._active_tasks if not t.done()]
483
- # if pending:
484
- # logger.info(f"等待 {len(pending)} 个活跃任务完成...")
485
- # await asyncio.gather(*pending, return_exceptions=True)
486
- # logger.info("所有爬虫已优雅关闭")
487
- #
488
- # @classmethod
489
- # def _get_default_settings(cls) -> SettingManager:
490
- # """加载默认配置"""
491
- # try:
492
- # return get_settings()
493
- # except Exception as e:
494
- # logger.warning(f"无法加载默认配置: {e}")
495
- # return SettingManager()
950
+
951
+ # === 工具函数 ===
952
+
953
+ def create_crawler_with_optimizations(
954
+ spider_cls: Type[Spider],
955
+ settings: Optional[SettingManager] = None,
956
+ **optimization_kwargs
957
+ ) -> Crawler:
958
+ """
959
+ 创建优化的爬虫实例
960
+
961
+ :param spider_cls: 爬虫类
962
+ :param settings: 设置管理器
963
+ :param optimization_kwargs: 优化参数
964
+ :return: 爬虫实例
965
+ """
966
+ if settings is None:
967
+ settings = SettingManager()
968
+
969
+ # 应用优化配置
970
+ for key, value in optimization_kwargs.items():
971
+ settings.set(key, value)
972
+
973
+ context = CrawlerContext()
974
+ return Crawler(spider_cls, settings, context)
975
+
976
+
977
+ def create_process_with_large_scale_config(
978
+ config_type: str = 'balanced',
979
+ concurrency: int = 16,
980
+ **kwargs
981
+ ) -> CrawlerProcess:
982
+ """
983
+ 创建支持大规模优化的进程管理器
984
+
985
+ :param config_type: 配置类型 ('conservative', 'balanced', 'aggressive', 'memory_optimized')
986
+ :param concurrency: 并发数
987
+ :param kwargs: 其他参数
988
+ :return: 进程管理器
989
+ """
990
+ try:
991
+ from crawlo.utils.large_scale_config import LargeScaleConfig
992
+
993
+ # 获取优化配置
994
+ config_methods = {
995
+ 'conservative': LargeScaleConfig.conservative_config,
996
+ 'balanced': LargeScaleConfig.balanced_config,
997
+ 'aggressive': LargeScaleConfig.aggressive_config,
998
+ 'memory_optimized': LargeScaleConfig.memory_optimized_config
999
+ }
1000
+
1001
+ if config_type not in config_methods:
1002
+ logger.warning(f"未知的配置类型: {config_type},使用默认配置")
1003
+ settings = SettingManager()
1004
+ else:
1005
+ config = config_methods[config_type](concurrency)
1006
+ settings = SettingManager()
1007
+ settings.update(config)
1008
+
1009
+ return CrawlerProcess(
1010
+ settings=settings,
1011
+ max_concurrency=concurrency,
1012
+ **kwargs
1013
+ )
1014
+
1015
+ except ImportError:
1016
+ logger.warning("大规模配置模块不存在,使用默认配置")
1017
+ return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1018
+
1019
+
1020
+ # === 导出接口 ===
1021
+
1022
+ __all__ = [
1023
+ 'Crawler',
1024
+ 'CrawlerProcess',
1025
+ 'CrawlerContext',
1026
+ 'create_crawler_with_optimizations',
1027
+ 'create_process_with_large_scale_config'
1028
+ ]