crawlo 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (105) hide show
  1. crawlo/__init__.py +24 -0
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/run.py +58 -32
  4. crawlo/core/__init__.py +44 -0
  5. crawlo/core/engine.py +119 -45
  6. crawlo/core/scheduler.py +4 -3
  7. crawlo/crawler.py +603 -1133
  8. crawlo/downloader/aiohttp_downloader.py +4 -2
  9. crawlo/extension/__init__.py +1 -1
  10. crawlo/extension/logging_extension.py +23 -7
  11. crawlo/factories/__init__.py +28 -0
  12. crawlo/factories/base.py +69 -0
  13. crawlo/factories/crawler.py +104 -0
  14. crawlo/factories/registry.py +85 -0
  15. crawlo/filters/aioredis_filter.py +25 -2
  16. crawlo/framework.py +292 -0
  17. crawlo/initialization/__init__.py +40 -0
  18. crawlo/initialization/built_in.py +426 -0
  19. crawlo/initialization/context.py +142 -0
  20. crawlo/initialization/core.py +194 -0
  21. crawlo/initialization/phases.py +149 -0
  22. crawlo/initialization/registry.py +146 -0
  23. crawlo/items/base.py +2 -1
  24. crawlo/logging/__init__.py +38 -0
  25. crawlo/logging/config.py +97 -0
  26. crawlo/logging/factory.py +129 -0
  27. crawlo/logging/manager.py +112 -0
  28. crawlo/middleware/middleware_manager.py +1 -1
  29. crawlo/middleware/offsite.py +1 -1
  30. crawlo/mode_manager.py +26 -1
  31. crawlo/pipelines/pipeline_manager.py +2 -1
  32. crawlo/project.py +76 -46
  33. crawlo/queue/pqueue.py +11 -5
  34. crawlo/queue/queue_manager.py +143 -19
  35. crawlo/queue/redis_priority_queue.py +69 -49
  36. crawlo/settings/default_settings.py +110 -14
  37. crawlo/settings/setting_manager.py +29 -13
  38. crawlo/spider/__init__.py +34 -16
  39. crawlo/stats_collector.py +17 -3
  40. crawlo/task_manager.py +112 -3
  41. crawlo/templates/project/settings.py.tmpl +103 -202
  42. crawlo/templates/project/settings_distributed.py.tmpl +122 -135
  43. crawlo/templates/project/settings_gentle.py.tmpl +149 -43
  44. crawlo/templates/project/settings_high_performance.py.tmpl +127 -90
  45. crawlo/templates/project/settings_minimal.py.tmpl +46 -15
  46. crawlo/templates/project/settings_simple.py.tmpl +138 -75
  47. crawlo/templates/project/spiders/__init__.py.tmpl +5 -1
  48. crawlo/templates/run.py.tmpl +10 -14
  49. crawlo/templates/spiders_init.py.tmpl +10 -0
  50. crawlo/tools/network_diagnostic.py +365 -0
  51. crawlo/utils/class_loader.py +26 -0
  52. crawlo/utils/error_handler.py +76 -35
  53. crawlo/utils/log.py +41 -144
  54. crawlo/utils/redis_connection_pool.py +43 -6
  55. crawlo/utils/request_serializer.py +8 -1
  56. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/METADATA +120 -14
  57. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/RECORD +104 -45
  58. tests/authenticated_proxy_example.py +2 -2
  59. tests/baidu_performance_test.py +109 -0
  60. tests/baidu_test.py +60 -0
  61. tests/comprehensive_framework_test.py +213 -0
  62. tests/comprehensive_test.py +82 -0
  63. tests/comprehensive_testing_summary.md +187 -0
  64. tests/debug_configure.py +70 -0
  65. tests/debug_framework_logger.py +85 -0
  66. tests/debug_log_levels.py +64 -0
  67. tests/distributed_test.py +67 -0
  68. tests/distributed_test_debug.py +77 -0
  69. tests/final_command_test_report.md +0 -0
  70. tests/final_comprehensive_test.py +152 -0
  71. tests/final_validation_test.py +183 -0
  72. tests/framework_performance_test.py +203 -0
  73. tests/optimized_performance_test.py +212 -0
  74. tests/performance_comparison.py +246 -0
  75. tests/queue_blocking_test.py +114 -0
  76. tests/queue_test.py +90 -0
  77. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  78. tests/scrapy_comparison/scrapy_test.py +134 -0
  79. tests/simple_command_test.py +120 -0
  80. tests/simple_crawlo_test.py +128 -0
  81. tests/simple_log_test.py +58 -0
  82. tests/simple_optimization_test.py +129 -0
  83. tests/simple_spider_test.py +50 -0
  84. tests/simple_test.py +48 -0
  85. tests/test_all_commands.py +231 -0
  86. tests/test_batch_processor.py +179 -0
  87. tests/test_component_factory.py +175 -0
  88. tests/test_controlled_spider_mixin.py +80 -0
  89. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  90. tests/test_factories.py +253 -0
  91. tests/test_framework_logger.py +67 -0
  92. tests/test_framework_startup.py +65 -0
  93. tests/test_large_scale_config.py +113 -0
  94. tests/test_large_scale_helper.py +236 -0
  95. tests/test_mode_change.py +73 -0
  96. tests/test_mode_consistency.py +1 -1
  97. tests/test_performance_monitor.py +116 -0
  98. tests/test_queue_empty_check.py +42 -0
  99. tests/untested_features_report.md +139 -0
  100. tests/verify_debug.py +52 -0
  101. tests/verify_log_fix.py +112 -0
  102. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  103. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
  104. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
  105. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,1169 +1,639 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding: UTF-8 -*-
3
3
  """
4
- Crawlo Crawler Module
5
- ====================
6
- Provides crawler process management and runtime core functionality.
7
-
8
- Core Components:
9
- - Crawler: Single crawler runtime instance, managing Spider and engine lifecycle
10
- - CrawlerProcess: Crawler process manager, supporting multi-crawler concurrent scheduling and resource management
11
-
12
- Features:
13
- - Intelligent concurrency control and resource management
14
- - Graceful shutdown and signal handling
15
- - Statistics monitoring and performance tracking
16
- - Automatic module discovery and registration
17
- - Error recovery and retry mechanism
18
- - Large-scale crawler optimization support
19
-
20
- Example Usage:
21
- # Single crawler run
22
- crawler = Crawler(MySpider, settings)
23
- await crawler.crawl()
24
-
25
- # Multi-crawler concurrent management
26
- process = CrawlerProcess()
27
- await process.crawl([Spider1, Spider2])
4
+ 重构后的Crawler系统
5
+ ==================
6
+
7
+ 设计原则:
8
+ 1. 单一职责 - 每个类只负责一个明确的功能
9
+ 2. 依赖注入 - 通过工厂创建组件,便于测试
10
+ 3. 状态管理 - 清晰的状态转换和生命周期
11
+ 4. 错误处理 - 优雅的错误处理和恢复机制
28
12
  """
29
- from __future__ import annotations
13
+
30
14
  import asyncio
31
- import signal
32
15
  import time
33
- import threading
34
- from typing import Type, Optional, Set, List, Union, Dict, Any
35
- from .spider import Spider, get_global_spider_registry
36
- from .core.engine import Engine
37
- from .subscriber import Subscriber
38
- from .extension import ExtensionManager
39
- from crawlo.utils.log import get_logger
40
- from .stats_collector import StatsCollector
41
- from .event import spider_opened, spider_closed
42
- from .settings.setting_manager import SettingManager
43
- from crawlo.project import merge_settings, get_settings
44
-
45
- logger = get_logger(__name__)
46
-
47
-
48
- class CrawlerContext:
49
- """
50
- Crawler context manager
51
- Provides shared state and resource management
52
- """
53
-
54
- def __init__(self):
55
- self.start_time = time.time()
56
- self.total_crawlers = 0
57
- self.active_crawlers = 0
58
- self.completed_crawlers = 0
59
- self.failed_crawlers = 0
60
- self.error_log = []
61
- self._lock = threading.RLock()
62
-
63
- def increment_total(self):
64
- with self._lock:
65
- self.total_crawlers += 1
66
-
67
- def increment_active(self):
68
- with self._lock:
69
- self.active_crawlers += 1
70
-
71
- def decrement_active(self):
72
- with self._lock:
73
- self.active_crawlers -= 1
74
-
75
- def increment_completed(self):
76
- with self._lock:
77
- self.completed_crawlers += 1
78
-
79
- def increment_failed(self, error: str):
80
- with self._lock:
81
- self.failed_crawlers += 1
82
- self.error_log.append({
83
- 'timestamp': time.time(),
84
- 'error': error
85
- })
86
-
87
- def get_stats(self) -> Dict[str, Any]:
88
- with self._lock:
89
- duration = time.time() - self.start_time
90
- return {
91
- 'total_crawlers': self.total_crawlers,
92
- 'active_crawlers': self.active_crawlers,
93
- 'completed_crawlers': self.completed_crawlers,
94
- 'failed_crawlers': self.failed_crawlers,
95
- 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
96
- 'duration_seconds': round(duration, 2),
97
- 'error_count': len(self.error_log)
98
- }
16
+ from contextlib import asynccontextmanager
17
+ from dataclasses import dataclass
18
+ from enum import Enum
19
+ from typing import Optional, Type, Dict, Any, List
20
+
21
+ from crawlo.factories import get_component_registry
22
+ from crawlo.initialization import initialize_framework, is_framework_ready
23
+ from crawlo.logging import get_logger
24
+
25
+
26
+ class CrawlerState(Enum):
27
+ """Crawler状态枚举"""
28
+ CREATED = "created"
29
+ INITIALIZING = "initializing"
30
+ READY = "ready"
31
+ RUNNING = "running"
32
+ CLOSING = "closing"
33
+ CLOSED = "closed"
34
+ ERROR = "error"
35
+
36
+
37
+ @dataclass
38
+ class CrawlerMetrics:
39
+ """Crawler性能指标"""
40
+ start_time: Optional[float] = None
41
+ end_time: Optional[float] = None
42
+ initialization_duration: float = 0.0
43
+ crawl_duration: float = 0.0
44
+ request_count: int = 0
45
+ success_count: int = 0
46
+ error_count: int = 0
47
+
48
+ def get_total_duration(self) -> float:
49
+ if self.start_time and self.end_time:
50
+ return self.end_time - self.start_time
51
+ return 0.0
52
+
53
+ def get_success_rate(self) -> float:
54
+ total = self.success_count + self.error_count
55
+ return (self.success_count / total * 100) if total > 0 else 0.0
99
56
 
100
57
 
101
- class Crawler:
58
+ class ModernCrawler:
102
59
  """
103
- Single crawler runtime instance, managing Spider and engine lifecycle
104
-
105
- Provides functionality:
106
- - Spider lifecycle management (initialization, running, closing)
107
- - Engine component coordination management
108
- - Configuration merging and validation
109
- - Statistics data collection
110
- - Extension management
111
- - Exception handling and cleanup
60
+ 现代化的Crawler实现
61
+
62
+ 特点:
63
+ 1. 清晰的状态管理
64
+ 2. 依赖注入
65
+ 3. 组件化架构
66
+ 4. 完善的错误处理
112
67
  """
113
-
114
- def __init__(
115
- self,
116
- spider_cls: Type[Spider],
117
- settings: SettingManager,
118
- context: Optional[CrawlerContext] = None
119
- ):
120
- self.spider_cls = spider_cls
121
- self.spider: Optional[Spider] = None
122
- self.engine: Optional[Engine] = None
123
- self.stats: Optional[StatsCollector] = None
124
- self.subscriber: Optional[Subscriber] = None
125
- self.extension: Optional[ExtensionManager] = None
126
- self.settings: SettingManager = settings.copy()
127
- self.context = context or CrawlerContext()
128
-
129
- # State management
130
- self._closed = False
131
- self._close_lock = asyncio.Lock()
132
- self._start_time = None
133
- self._end_time = None
134
-
135
- # Performance monitoring
136
- self._performance_metrics = {
137
- 'initialization_time': 0,
138
- 'crawl_duration': 0,
139
- 'memory_peak': 0,
140
- 'request_count': 0,
141
- 'error_count': 0
142
- }
143
-
144
- # Initialize components
145
- self.subscriber = self._create_subscriber()
146
- self.spider = self._create_spider()
147
- self.engine = self._create_engine()
148
- self.stats = self._create_stats()
149
- # Note: Do not initialize extension manager here, let it initialize in the engine
150
-
151
- # Validate crawler state
152
- self._validate_crawler_state()
153
-
154
- # 打印启动信息,确保在日志系统配置之后打印
155
- self._log_startup_info()
68
+
69
+ def __init__(self, spider_cls: Type, settings=None):
70
+ self._spider_cls = spider_cls
71
+ self._settings = settings
72
+ self._state = CrawlerState.CREATED
73
+ self._state_lock = asyncio.Lock()
156
74
 
157
- # 将启动爬虫名称的日志移到这里,确保在日志系统配置之后打印
158
- logger.info(f"Starting running {self.spider.name}")
159
-
75
+ # 组件
76
+ self._spider = None
77
+ self._engine = None
78
+ self._stats = None
79
+ self._subscriber = None
80
+ self._extension = None
81
+
82
+ # 指标
83
+ self._metrics = CrawlerMetrics()
84
+
85
+ # 日志
86
+ self._logger = get_logger(f'crawler.{spider_cls.__name__ if spider_cls else "unknown"}')
87
+
88
+ # 确保框架已初始化
89
+ self._ensure_framework_ready()
90
+
91
+ def _ensure_framework_ready(self):
92
+ """确保框架已准备就绪"""
93
+ if not is_framework_ready():
94
+ try:
95
+ self._settings = initialize_framework(self._settings)
96
+ self._logger.debug("Framework initialized successfully")
97
+ except Exception as e:
98
+ self._logger.warning(f"Framework initialization failed: {e}")
99
+ # 使用降级策略
100
+ if not self._settings:
101
+ from crawlo.settings.setting_manager import SettingManager
102
+ self._settings = SettingManager()
103
+
104
+ # 确保是SettingManager实例
105
+ if isinstance(self._settings, dict):
106
+ from crawlo.settings.setting_manager import SettingManager
107
+ settings_manager = SettingManager()
108
+ settings_manager.update_attributes(self._settings)
109
+ self._settings = settings_manager
110
+
111
+ @property
112
+ def state(self) -> CrawlerState:
113
+ """获取当前状态"""
114
+ return self._state
115
+
116
+ @property
117
+ def spider(self):
118
+ """获取Spider实例"""
119
+ return self._spider
120
+
121
+ @property
122
+ def stats(self):
123
+ """获取Stats实例(向后兼容)"""
124
+ return self._stats
125
+
126
+ @property
127
+ def metrics(self) -> CrawlerMetrics:
128
+ """获取性能指标"""
129
+ return self._metrics
130
+
131
+ @property
132
+ def settings(self):
133
+ """获取配置"""
134
+ return self._settings
135
+
136
+ @property
137
+ def engine(self):
138
+ """获取Engine实例(向后兼容)"""
139
+ return self._engine
140
+
141
+ @property
142
+ def subscriber(self):
143
+ """获取Subscriber实例(向后兼容)"""
144
+ return self._subscriber
145
+
146
+ @property
147
+ def extension(self):
148
+ """获取Extension实例(向后兼容)"""
149
+ return self._extension
150
+
151
+ @extension.setter
152
+ def extension(self, value):
153
+ """设置Extension实例(向后兼容)"""
154
+ self._extension = value
155
+
156
+ def _create_extension(self):
157
+ """创建Extension管理器(向后兼容)"""
158
+ if self._extension is None:
159
+ try:
160
+ registry = get_component_registry()
161
+ self._extension = registry.create('extension_manager', crawler=self)
162
+ except Exception as e:
163
+ self._logger.warning(f"Failed to create extension manager: {e}")
164
+ return self._extension
165
+
166
+ async def close(self):
167
+ """关闭爹虫(向后兼容)"""
168
+ await self._cleanup()
169
+
160
170
  async def crawl(self):
161
- """
162
- Start the crawler core process
163
-
164
- Includes the following stages:
165
- 1. Initialization stage: Create all components
166
- 2. Validation stage: Check configuration and state
167
- 3. Running stage: Start the crawler engine
168
- 4. Cleanup stage: Resource release
169
- """
170
- init_start = time.time()
171
- self._start_time = init_start
172
-
171
+ """执行爬取任务"""
172
+ async with self._lifecycle_manager():
173
+ await self._initialize_components()
174
+ await self._run_crawler()
175
+
176
+ @asynccontextmanager
177
+ async def _lifecycle_manager(self):
178
+ """生命周期管理"""
179
+ self._metrics.start_time = time.time()
180
+
173
181
  try:
174
- # Update context status
175
- self.context.increment_active()
176
-
177
- # Phase 1: Initialize components
178
- # Adjust component initialization order to ensure log output order meets requirements
179
- self.subscriber = self._create_subscriber()
180
- self.spider = self._create_spider()
181
- self.engine = self._create_engine()
182
- self.stats = self._create_stats()
183
- # Note: Do not initialize extension manager here, let it initialize in the engine
184
-
185
- # Record initialization time
186
- self._performance_metrics['initialization_time'] = time.time() - init_start
187
-
188
- # Phase 2: Validate state
189
- self._validate_crawler_state()
190
-
191
- # Phase 3: Display runtime configuration summary
192
- self._log_runtime_summary()
193
-
194
- # Phase 4: Start crawler
195
- crawl_start = time.time()
196
- await self.engine.start_spider(self.spider)
197
-
198
- # Record crawl time
199
- self._performance_metrics['crawl_duration'] = time.time() - crawl_start
200
- self._end_time = time.time()
201
-
202
- # Update context status
203
- self.context.increment_completed()
204
-
205
- logger.info(f"Spider {self.spider.name} completed, took {self._get_total_duration():.2f} seconds")
206
-
182
+ yield
207
183
  except Exception as e:
208
- self._performance_metrics['error_count'] += 1
209
- self.context.increment_failed(str(e))
210
- logger.error(f"Spider {getattr(self.spider, 'name', 'Unknown')} failed to run: {e}", exc_info=True)
184
+ await self._handle_error(e)
211
185
  raise
212
186
  finally:
213
- self.context.decrement_active()
214
- # Ensure resource cleanup
215
- await self._ensure_cleanup()
216
-
217
- def _log_runtime_summary(self):
218
- """Log runtime configuration summary"""
219
- # Get spider name
220
- spider_name = getattr(self.spider, 'name', 'Unknown')
221
-
222
- # Ensure spider name is a string and strip leading/trailing whitespace
223
- if spider_name:
224
- spider_name = str(spider_name).strip()
225
- else:
226
- spider_name = 'Unknown'
227
-
228
- logger.info(f"Starting running {spider_name}")
229
-
230
- def _validate_crawler_state(self):
231
- """
232
- Validate crawler state and configuration
233
- Ensure all necessary components are properly initialized
234
- """
235
- if not self.spider:
236
- raise RuntimeError("Spider instance not initialized")
237
- if not self.engine:
238
- raise RuntimeError("Engine not initialized")
239
- if not self.stats:
240
- raise RuntimeError("Stats collector not initialized")
241
- if not self.subscriber:
242
- raise RuntimeError("Event subscriber not initialized")
243
-
244
- # Check key configuration
245
- if not self.spider.name:
246
- raise ValueError("Spider name cannot be empty")
247
-
248
- logger.debug(f"Spider {self.spider.name} state validation passed")
249
-
250
- def _get_total_duration(self) -> float:
251
- """Get total runtime"""
252
- if self._start_time and self._end_time:
253
- return self._end_time - self._start_time
254
- return 0.0
255
-
256
- def _log_startup_info(self):
257
- """Print startup information, including run mode and key configuration checks"""
258
- # Get run mode
259
- run_mode = self.settings.get('RUN_MODE', 'standalone')
260
-
261
- # Get version number
262
- version = self.settings.get('VERSION', '1.0.0')
263
- if not version or version == 'None':
264
- version = '1.0.0'
265
-
266
- # Print framework start info
267
- logger.info(f"Crawlo Framework Started {version}")
268
-
269
- # Add mode info if available
270
- mode_info = self.settings.get('_mode_info')
271
- if mode_info:
272
- logger.info(mode_info)
273
- else:
274
- # 如果没有_mode_info,添加默认信息
275
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
276
-
277
- # Get actual queue type
278
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
279
-
280
- # Display information based on run mode and queue type combination
281
- if run_mode == 'distributed':
282
- logger.info("Run Mode: distributed")
283
- logger.info("Distributed Mode - Multi-node collaboration supported")
284
- # Show Redis configuration
285
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
286
- redis_port = self.settings.get('REDIS_PORT', 6379)
287
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
288
- elif run_mode == 'standalone':
289
- if queue_type == 'redis':
290
- logger.info("Run Mode: standalone+redis")
291
- # Show Redis configuration
292
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
293
- redis_port = self.settings.get('REDIS_PORT', 6379)
294
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
295
- elif queue_type == 'auto':
296
- logger.info("Run Mode: standalone+auto")
297
- else: # memory
298
- logger.info("Run Mode: standalone")
299
- else:
300
- logger.info(f"Run Mode: {run_mode}")
301
-
302
- async def _ensure_cleanup(self):
303
- """Ensure resource cleanup"""
187
+ await self._cleanup()
188
+ self._metrics.end_time = time.time()
189
+
190
+ async def _initialize_components(self):
191
+ """初始化组件"""
192
+ async with self._state_lock:
193
+ if self._state != CrawlerState.CREATED:
194
+ raise RuntimeError(f"Cannot initialize from state {self._state}")
195
+
196
+ self._state = CrawlerState.INITIALIZING
197
+
198
+ init_start = time.time()
199
+
304
200
  try:
305
- if not self._closed:
306
- await self.close()
307
- except Exception as e:
308
- logger.warning(f"Error cleaning up resources: {e}")
309
-
310
- def get_performance_metrics(self) -> Dict[str, Any]:
311
- """Get performance metrics"""
312
- metrics = self._performance_metrics.copy()
313
- metrics['total_duration'] = self._get_total_duration()
314
- if self.stats:
315
- # Add statistics data
316
- stats_data = getattr(self.stats, 'get_stats', lambda: {})()
317
- metrics.update(stats_data)
318
- return metrics
319
-
320
- @staticmethod
321
- def _create_subscriber() -> Subscriber:
322
- """Create event subscriber"""
323
- return Subscriber()
324
-
325
- def _create_spider(self) -> Spider:
326
- """
327
- Create and validate spider instance (enhanced version)
328
-
329
- Performs the following validations:
330
- - Spider name must exist
331
- - start_requests method must be callable
332
- - start_urls cannot be a string
333
- - parse method is recommended to exist
334
- """
335
- spider = self.spider_cls.create_instance(self)
336
-
337
- # Required attribute check
338
- if not getattr(spider, 'name', None):
339
- raise AttributeError(
340
- f"Spider class '{self.spider_cls.__name__}' must define 'name' attribute.\n"
341
- f"Example: name = 'my_spider'"
342
- )
343
-
344
- if not callable(getattr(spider, 'start_requests', None)):
345
- raise AttributeError(
346
- f"Spider '{spider.name}' must implement a callable 'start_requests' method.\n"
347
- f"Example: def start_requests(self): yield Request(url='...')"
348
- )
349
-
350
- # start_urls type check
351
- start_urls = getattr(spider, 'start_urls', [])
352
- if isinstance(start_urls, str):
353
- raise TypeError(
354
- f"Spider '{spider.name}' 'start_urls' must be a list or tuple, not a string.\n"
355
- f"Correct: start_urls = ['http://example.com']\n"
356
- f"Incorrect: start_urls = 'http://example.com'"
357
- )
358
-
359
- # parse method check (warning instead of error)
360
- if not callable(getattr(spider, 'parse', None)):
361
- logger.warning(
362
- f"Spider '{spider.name}' does not define 'parse' method.\n"
363
- f"Ensure all Requests specify a callback function, otherwise responses will be ignored."
364
- )
365
-
366
- # Set spider configuration
367
- self._set_spider(spider)
368
-
369
- logger.debug(f"Spider '{spider.name}' initialized successfully")
370
- return spider
371
-
372
- def _create_engine(self) -> Engine:
373
- """Create and initialize engine"""
374
- engine = Engine(self)
375
- engine.engine_start()
376
- logger.debug(f"Engine initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
377
- return engine
378
-
379
- def _create_stats(self) -> StatsCollector:
380
- """Create stats collector"""
381
- stats = StatsCollector(self)
382
- logger.debug(
383
- f"Stats collector initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
384
- return stats
385
-
386
- def _create_extension(self) -> ExtensionManager:
387
- """Create extension manager"""
388
- # Modify extension manager creation method, delay initialization until needed
389
- extension = ExtensionManager.create_instance(self)
390
- logger.debug(
391
- f"Extension manager initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
392
- return extension
393
-
394
- def _set_spider(self, spider: Spider):
395
- """
396
- Set spider configuration and event subscription
397
- Bind spider lifecycle events with subscriber
398
- """
399
- # Subscribe to spider lifecycle events
400
- self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
401
- self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
402
-
403
- # Merge spider custom configuration
404
- merge_settings(spider, self.settings)
405
-
406
- logger.debug(f"Spider '{spider.name}' configuration merged successfully")
407
-
408
- async def close(self, reason='finished') -> None:
409
- """
410
- Close crawler and clean up resources (enhanced version)
411
-
412
- Ensure closing only once and handle all cleanup operations
413
- """
414
- async with self._close_lock:
415
- if self._closed:
416
- return
417
-
418
- self._closed = True
419
- self._end_time = time.time()
420
-
201
+ # 使用组件工厂创建组件
202
+ registry = get_component_registry()
203
+
204
+ # 创建Subscriber(无依赖)
205
+ self._subscriber = registry.create('subscriber')
206
+
207
+ # 创建Spider
208
+ self._spider = self._create_spider()
209
+
210
+ # 创建Engine(需要crawler参数)
211
+ self._engine = registry.create('engine', crawler=self)
212
+
213
+ # 创建Stats(需要crawler参数)
214
+ self._stats = registry.create('stats', crawler=self)
215
+
216
+ # 创建Extension Manager (可选,需要crawler参数)
421
217
  try:
422
- # Notify spider close event
423
- if self.subscriber:
424
- await self.subscriber.notify(spider_closed)
425
-
426
- # Statistics data collection
427
- if self.stats and self.spider:
428
- self.stats.close_spider(spider=self.spider, reason=reason)
429
- # Record statistics data
430
- try:
431
- from crawlo.commands.stats import record_stats
432
- record_stats(self)
433
- except ImportError:
434
- logger.debug("Statistics recording module does not exist, skipping statistics recording")
435
-
436
- logger.info(
437
- f"Spider '{getattr(self.spider, 'name', 'Unknown')}' closed, "
438
- f"reason: {reason}, took: {self._get_total_duration():.2f} seconds"
439
- )
440
-
218
+ self._extension = registry.create('extension_manager', crawler=self)
441
219
  except Exception as e:
442
- logger.error(f"Error closing crawler: {e}", exc_info=True)
443
- finally:
444
- # Ensure resource cleanup
445
- await self._cleanup_resources()
446
-
447
- async def _cleanup_resources(self):
448
- """Clean up all resources"""
449
- cleanup_tasks = []
450
-
451
- # Engine cleanup
452
- if self.engine:
453
- try:
454
- cleanup_tasks.append(self.engine.close())
455
- except AttributeError:
456
- pass # Engine has no close method
457
-
458
- # Extension cleanup
459
- if self.extension:
460
- try:
461
- cleanup_tasks.append(self.extension.close())
462
- except AttributeError:
463
- pass
464
-
465
- # Stats collector cleanup
466
- if self.stats:
467
- try:
468
- cleanup_tasks.append(self.stats.close())
469
- except AttributeError:
470
- pass
471
-
472
- # Concurrently execute cleanup tasks
473
- if cleanup_tasks:
474
- await asyncio.gather(*cleanup_tasks, return_exceptions=True)
475
-
476
- logger.debug("Resource cleanup completed")
220
+ self._logger.warning(f"Failed to create extension manager: {e}")
221
+
222
+ self._metrics.initialization_duration = time.time() - init_start
223
+
224
+ async with self._state_lock:
225
+ self._state = CrawlerState.READY
226
+
227
+ self._logger.debug(f"Crawler components initialized successfully in {self._metrics.initialization_duration:.2f}s")
228
+
229
+ except Exception as e:
230
+ async with self._state_lock:
231
+ self._state = CrawlerState.ERROR
232
+ raise RuntimeError(f"Component initialization failed: {e}")
233
+
234
+ def _create_spider(self):
235
+ """创建Spider实例"""
236
+ if not self._spider_cls:
237
+ raise ValueError("Spider class not provided")
238
+
239
+ # 检查Spider类的有效性
240
+ if not hasattr(self._spider_cls, 'name'):
241
+ raise ValueError("Spider class must have 'name' attribute")
242
+
243
+ # 创建Spider实例
244
+ spider = self._spider_cls()
245
+
246
+ # 设置crawler引用
247
+ if hasattr(spider, 'crawler'):
248
+ spider.crawler = self
249
+
250
+ return spider
251
+
252
+ async def _run_crawler(self):
253
+ """运行爬虫引擎"""
254
+ async with self._state_lock:
255
+ if self._state != CrawlerState.READY:
256
+ raise RuntimeError(f"Cannot run from state {self._state}")
257
+
258
+ self._state = CrawlerState.RUNNING
259
+
260
+ crawl_start = time.time()
261
+
262
+ try:
263
+ # 启动引擎
264
+ if self._engine:
265
+ await self._engine.start_spider(self._spider)
266
+ else:
267
+ raise RuntimeError("Engine not initialized")
268
+
269
+ self._metrics.crawl_duration = time.time() - crawl_start
270
+
271
+ self._logger.info(f"Crawler completed successfully in {self._metrics.crawl_duration:.2f}s")
272
+
273
+ except Exception as e:
274
+ self._metrics.crawl_duration = time.time() - crawl_start
275
+ raise RuntimeError(f"Crawler execution failed: {e}")
276
+
277
+ async def _handle_error(self, error: Exception):
278
+ """处理错误"""
279
+ async with self._state_lock:
280
+ self._state = CrawlerState.ERROR
281
+
282
+ self._metrics.error_count += 1
283
+ self._logger.error(f"Crawler error: {error}", exc_info=True)
284
+
285
+ # 这里可以添加错误恢复逻辑
286
+
287
+ async def _cleanup(self):
288
+ """清理资源"""
289
+ async with self._state_lock:
290
+ if self._state not in [CrawlerState.CLOSING, CrawlerState.CLOSED]:
291
+ self._state = CrawlerState.CLOSING
292
+
293
+ try:
294
+ # 关闭各个组件
295
+ if self._engine and hasattr(self._engine, 'close'):
296
+ try:
297
+ await self._engine.close()
298
+ except Exception as e:
299
+ self._logger.warning(f"Engine cleanup failed: {e}")
300
+
301
+ # 调用Spider的spider_closed方法
302
+ if self._spider:
303
+ try:
304
+ if asyncio.iscoroutinefunction(self._spider.spider_closed):
305
+ await self._spider.spider_closed()
306
+ else:
307
+ self._spider.spider_closed()
308
+ except Exception as e:
309
+ self._logger.warning(f"Spider cleanup failed: {e}")
310
+
311
+ if self._stats and hasattr(self._stats, 'close'):
312
+ try:
313
+ close_result = self._stats.close()
314
+ if asyncio.iscoroutine(close_result):
315
+ await close_result
316
+ except Exception as e:
317
+ self._logger.warning(f"Stats cleanup failed: {e}")
318
+
319
+ async with self._state_lock:
320
+ self._state = CrawlerState.CLOSED
321
+
322
+ self._logger.debug("Crawler cleanup completed")
323
+
324
+ except Exception as e:
325
+ self._logger.error(f"Cleanup error: {e}")
477
326
 
478
327
 
479
328
  class CrawlerProcess:
480
329
  """
481
- Crawler process manager
482
-
483
- Supported features:
484
- - Multi-crawler concurrent scheduling and resource management
485
- - Automatic module discovery and spider registration
486
- - Intelligent concurrency control and load balancing
487
- - Graceful shutdown and signal handling
488
- - Real-time status monitoring and statistics
489
- - Error recovery and retry mechanism
490
- - Large-scale crawler optimization support
491
-
492
- Usage example:
493
- # Basic usage
494
- process = CrawlerProcess()
495
- await process.crawl(MySpider)
496
-
497
- # Multi-crawler concurrency
498
- await process.crawl([Spider1, Spider2, 'spider_name'])
499
-
500
- # Custom concurrency
501
- process = CrawlerProcess(max_concurrency=8)
330
+ Crawler进程管理器 - 管理多个Crawler的执行
331
+
332
+ 简化版本,专注于核心功能
502
333
  """
503
-
504
- def __init__(
505
- self,
506
- settings: Optional[SettingManager] = None,
507
- max_concurrency: Optional[int] = None,
508
- spider_modules: Optional[List[str]] = None,
509
- enable_monitoring: bool = True
510
- ):
511
- # Basic configuration
512
- self.settings: SettingManager = settings or self._get_default_settings()
513
- self.crawlers: Set[Crawler] = set()
514
- self._active_tasks: Set[asyncio.Task] = set()
515
-
516
- # Context manager
517
- self.context = CrawlerContext()
518
-
519
- # Concurrency control configuration
520
- self.max_concurrency: int = (
521
- max_concurrency
522
- or self.settings.get('MAX_RUNNING_SPIDERS')
523
- or self.settings.get('CONCURRENCY', 3)
524
- )
525
- self.semaphore = asyncio.Semaphore(self.max_concurrency)
526
-
527
- # Monitoring configuration
528
- self.enable_monitoring = enable_monitoring
529
- self._monitoring_task = None
530
- self._shutdown_event = asyncio.Event()
531
-
532
- # Automatically discover and import spider modules
334
+
335
+ def __init__(self, settings=None, max_concurrency: int = 3, spider_modules=None):
336
+ self._settings = settings or initialize_framework()
337
+ self._max_concurrency = max_concurrency
338
+ self._crawlers: List[ModernCrawler] = []
339
+ self._semaphore = asyncio.Semaphore(max_concurrency)
340
+ self._logger = get_logger('crawler.process')
341
+ self._spider_modules = spider_modules # 保存spider_modules
342
+
343
+ # 如果提供了spider_modules,自动注册这些模块中的爬虫
533
344
  if spider_modules:
534
- self.auto_discover(spider_modules)
535
-
536
- # Use snapshot of global registry (avoid subsequent import impact)
537
- self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
538
-
539
- # Performance monitoring
540
- self._performance_stats = {
541
- 'total_requests': 0,
542
- 'successful_requests': 0,
543
- 'failed_requests': 0,
544
- 'memory_usage_mb': 0,
545
- 'cpu_usage_percent': 0
546
- }
547
-
548
- # Register signal handlers
549
- signal.signal(signal.SIGINT, self._shutdown)
550
- signal.signal(signal.SIGTERM, self._shutdown)
551
-
552
- # 注意:移除在这里调用_log_startup_info(),因为这时候日志系统可能还没有被正确配置
553
- # 日志系统的配置是在project.py的get_settings函数中进行的,而CrawlerProcess的实例化
554
- # 是在get_settings函数返回之前进行的,所以这时候调用_log_startup_info()可能会导致
555
- # 日志信息没有被正确写入到日志文件中
556
-
557
- logger.debug(
558
- f"CrawlerProcess initialized successfully\n"
559
- f" - Max concurrent crawlers: {self.max_concurrency}\n"
560
- f" - Registered crawlers: {len(self._spider_registry)}\n"
561
- f" - Monitoring enabled: {self.enable_monitoring}"
562
- )
563
-
564
- async def start_monitoring(self):
565
- """Start monitoring task"""
566
- if not self.enable_monitoring:
567
- return
568
-
569
- self._monitoring_task = asyncio.create_task(self._monitor_loop())
570
- logger.debug("Monitoring task started")
571
-
572
- async def stop_monitoring(self):
573
- """Stop monitoring task"""
574
- if self._monitoring_task and not self._monitoring_task.done():
575
- self._monitoring_task.cancel()
576
- try:
577
- await self._monitoring_task
578
- except asyncio.CancelledError:
579
- pass
580
- logger.debug("Monitoring task stopped")
581
-
582
- async def _monitor_loop(self):
583
- """Monitoring loop, periodically collect and report status"""
584
- try:
585
- while not self._shutdown_event.is_set():
586
- await self._collect_performance_stats()
587
-
588
- # Output status every 30 seconds
589
- stats = self.context.get_stats()
590
- if stats['active_crawlers'] > 0:
591
- logger.debug(
592
- f"Crawler status: Active {stats['active_crawlers']}, "
593
- f"Completed {stats['completed_crawlers']}, "
594
- f"Failed {stats['failed_crawlers']}, "
595
- f"Success rate {stats['success_rate']:.1f}%"
596
- )
597
-
598
- await asyncio.sleep(30) # 30 second interval
599
-
600
- except asyncio.CancelledError:
601
- logger.debug("Monitoring loop cancelled")
602
- except Exception as e:
603
- logger.error(f"Monitoring loop error: {e}", exc_info=True)
604
-
605
- async def _collect_performance_stats(self):
606
- """Collect performance statistics data"""
345
+ self._register_spider_modules(spider_modules)
346
+
347
+ # 指标
348
+ self._start_time: Optional[float] = None
349
+ self._end_time: Optional[float] = None
350
+
351
+ def _register_spider_modules(self, spider_modules):
352
+ """注册爬虫模块"""
607
353
  try:
608
- import psutil
609
- import os
610
-
611
- process = psutil.Process(os.getpid())
612
- memory_info = process.memory_info()
613
-
614
- self._performance_stats.update({
615
- 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
616
- 'cpu_usage_percent': round(process.cpu_percent(), 2)
617
- })
618
-
619
- except ImportError:
620
- # Skip performance monitoring when psutil is not available
621
- pass
354
+ from crawlo.spider import get_global_spider_registry
355
+ registry = get_global_spider_registry()
356
+
357
+ self._logger.debug(f"Registering spider modules: {spider_modules}")
358
+
359
+ initial_spider_count = len(registry)
360
+
361
+ for module_path in spider_modules:
362
+ try:
363
+ # 导入模块
364
+ __import__(module_path)
365
+ self._logger.debug(f"Successfully imported spider module: {module_path}")
366
+ except ImportError as e:
367
+ self._logger.warning(f"Failed to import spider module {module_path}: {e}")
368
+ # 如果导入失败,尝试自动发现
369
+ self._auto_discover_spider_modules([module_path])
370
+
371
+ # 检查注册表中的爬虫
372
+ spider_names = list(registry.keys())
373
+ self._logger.debug(f"Registered spiders after import: {spider_names}")
374
+
375
+ # 如果导入模块后没有新的爬虫被注册,则尝试自动发现
376
+ final_spider_count = len(registry)
377
+ if final_spider_count == initial_spider_count:
378
+ self._logger.debug("No new spiders registered after importing modules, attempting auto-discovery")
379
+ self._auto_discover_spider_modules(spider_modules)
380
+ spider_names = list(registry.keys())
381
+ self._logger.debug(f"Registered spiders after auto-discovery: {spider_names}")
622
382
  except Exception as e:
623
- logger.debug(f"Failed to collect performance statistics: {e}")
624
-
625
- @staticmethod
626
- def auto_discover(modules: List[str]):
383
+ self._logger.warning(f"Error registering spider modules: {e}")
384
+
385
+ def _auto_discover_spider_modules(self, spider_modules):
627
386
  """
628
- Automatically import modules, trigger Spider class definition and registration (enhanced version)
629
-
630
- Supports recursive scanning and error recovery
387
+ 自动发现并导入爬虫模块中的所有爬虫
388
+ 这个方法会扫描指定模块目录下的所有Python文件并自动导入
631
389
  """
632
- import importlib
633
- import pkgutil
634
-
635
- discovered_count = 0
636
- error_count = 0
637
-
638
- for module_name in modules:
639
- try:
640
- module = importlib.import_module(module_name)
641
-
642
- if hasattr(module, '__path__'):
643
- # Package module, recursive scanning
644
- for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
645
- try:
646
- importlib.import_module(name)
647
- discovered_count += 1
648
- except Exception as sub_e:
649
- error_count += 1
650
- logger.warning(f"Failed to import submodule {name}: {sub_e}")
651
- else:
652
- # Single module
653
- importlib.import_module(module_name)
654
- discovered_count += 1
655
-
656
- logger.debug(f"Module scanned: {module_name}")
657
-
658
- except Exception as e:
659
- error_count += 1
660
- logger.error(f"Failed to scan module {module_name}: {e}", exc_info=True)
661
-
662
- logger.debug(
663
- f"Spider registration completed: {discovered_count} succeeded, {error_count} failed"
664
- )
665
-
666
- # === Public read-only interface: Avoid direct access to _spider_registry ===
667
-
668
- def get_spider_names(self) -> List[str]:
669
- """Get all registered spider names"""
670
- return list(self._spider_registry.keys())
671
-
672
- def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
673
- """Get spider class by name"""
674
- return self._spider_registry.get(name)
675
-
390
+ try:
391
+ from crawlo.spider import get_global_spider_registry
392
+ import importlib
393
+ from pathlib import Path
394
+ import sys
395
+
396
+ registry = get_global_spider_registry()
397
+ initial_spider_count = len(registry)
398
+
399
+ for module_path in spider_modules:
400
+ try:
401
+ # 将模块路径转换为文件系统路径
402
+ # 例如: ofweek_standalone.spiders -> ofweek_standalone/spiders
403
+ package_parts = module_path.split('.')
404
+ if len(package_parts) < 2:
405
+ continue
406
+
407
+ # 获取项目根目录
408
+ project_root = None
409
+ for path in sys.path:
410
+ if path and Path(path).exists():
411
+ possible_module_path = Path(path) / package_parts[0]
412
+ if possible_module_path.exists():
413
+ project_root = path
414
+ break
415
+
416
+ if not project_root:
417
+ # 尝试使用当前工作目录
418
+ project_root = str(Path.cwd())
419
+
420
+ # 构建模块目录路径
421
+ module_dir = Path(project_root)
422
+ for part in package_parts:
423
+ module_dir = module_dir / part
424
+
425
+ # 如果目录存在,扫描其中的Python文件
426
+ if module_dir.exists() and module_dir.is_dir():
427
+ # 导入目录下的所有Python文件(除了__init__.py)
428
+ for py_file in module_dir.glob("*.py"):
429
+ if py_file.name.startswith('_'):
430
+ continue
431
+
432
+ # 构造模块名
433
+ module_name = py_file.stem # 文件名(不含扩展名)
434
+ full_module_path = f"{module_path}.{module_name}"
435
+
436
+ try:
437
+ # 导入模块以触发Spider注册
438
+ importlib.import_module(full_module_path)
439
+ except ImportError as e:
440
+ self._logger.warning(f"Failed to auto-import spider module {full_module_path}: {e}")
441
+ except Exception as e:
442
+ self._logger.warning(f"Error during auto-discovery for module {module_path}: {e}")
443
+
444
+ # 检查是否有新的爬虫被注册
445
+ final_spider_count = len(registry)
446
+ if final_spider_count > initial_spider_count:
447
+ new_spiders = list(registry.keys())
448
+ self._logger.info(f"Auto-discovered {final_spider_count - initial_spider_count} new spiders: {new_spiders}")
449
+
450
+ except Exception as e:
451
+ self._logger.warning(f"Error during auto-discovery of spider modules: {e}")
452
+
676
453
  def is_spider_registered(self, name: str) -> bool:
677
- """Check if a name is registered"""
678
- return name in self._spider_registry
679
-
680
- async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
681
- """
682
- Start one or more crawlers
683
-
684
- Enhanced features:
685
- - Intelligent concurrency control
686
- - Real-time monitoring and statistics
687
- - Error recovery and retry
688
- - Graceful shutdown handling
689
- """
690
- # Phase 1: Preprocessing and validation
691
- spider_classes_to_run = self._resolve_spiders_to_run(spiders)
692
- total = len(spider_classes_to_run)
693
-
694
- if total == 0:
695
- raise ValueError("At least one spider class or name must be provided")
696
-
697
- # Phase 2: Initialize context and monitoring
698
- for _ in range(total):
699
- self.context.increment_total()
700
-
701
- # Start monitoring task
702
- await self.start_monitoring()
703
-
454
+ """检查爬虫是否已注册"""
455
+ from crawlo.spider import get_global_spider_registry
456
+ registry = get_global_spider_registry()
457
+ return name in registry
458
+
459
+ def get_spider_class(self, name: str):
460
+ """获取爬虫类"""
461
+ from crawlo.spider import get_global_spider_registry
462
+ registry = get_global_spider_registry()
463
+ return registry.get(name)
464
+
465
+ def get_spider_names(self):
466
+ """获取所有注册的爬虫名称"""
467
+ from crawlo.spider import get_global_spider_registry
468
+ registry = get_global_spider_registry()
469
+ return list(registry.keys())
470
+
471
+ async def crawl(self, spider_cls_or_name, settings=None):
472
+ """运行单个爬虫"""
473
+ spider_cls = self._resolve_spider_class(spider_cls_or_name)
474
+
475
+ # 记录启动的爬虫名称(符合规范要求)
476
+ from crawlo.logging import get_logger
477
+ logger = get_logger('crawlo.framework')
478
+ logger.info(f"Starting spider: {spider_cls.name}")
479
+
480
+ merged_settings = self._merge_settings(settings)
481
+ crawler = ModernCrawler(spider_cls, merged_settings)
482
+
483
+ async with self._semaphore:
484
+ await crawler.crawl()
485
+
486
+ return crawler
487
+
488
+ async def crawl_multiple(self, spider_classes_or_names, settings=None):
489
+ """运行多个爬虫"""
490
+ self._start_time = time.time()
491
+
704
492
  try:
705
- # Phase 3: Initialize context and monitoring
706
- spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
707
-
708
- logger.debug(
709
- f"Starting {total} crawlers\n"
710
- f" - Max concurrency: {self.max_concurrency}\n"
711
- f" - Spider list: {[cls.__name__ for cls in spider_classes_to_run]}"
712
- )
713
-
714
- # Phase 4: Stream start all crawler tasks
715
- tasks = [
716
- asyncio.create_task(
717
- self._run_spider_with_limit(spider_cls, index + 1, total),
718
- name=f"spider-{spider_cls.__name__}-{index + 1}"
719
- )
720
- for index, spider_cls in enumerate(spider_classes_to_run)
721
- ]
722
-
723
- # Phase 5: Wait for all tasks to complete (failures do not interrupt)
724
- results = await asyncio.gather(*tasks, return_exceptions=True)
725
-
726
- # Phase 6: Statistics exceptions and results
727
- failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
728
- successful = total - len(failed)
729
-
730
- if failed:
731
- failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
732
- logger.error(
733
- f"Crawler execution result: {successful}/{total} succeeded, {len(failed)}/{total} failed\n"
734
- f" - Failed crawlers: {failed_spiders}"
735
- )
736
-
737
- # Record detailed error information
738
- for i in failed:
739
- error = results[i]
740
- logger.error(f"Spider {spider_classes_to_run[i].__name__} error details: {error}")
493
+ spider_classes = []
494
+ for cls_or_name in spider_classes_or_names:
495
+ spider_cls = self._resolve_spider_class(cls_or_name)
496
+ spider_classes.append(spider_cls)
497
+
498
+ # 记录启动的爬虫名称(符合规范要求)
499
+ spider_names = [cls.name for cls in spider_classes]
500
+ from crawlo.logging import get_logger
501
+ logger = get_logger('crawlo.framework')
502
+ if len(spider_names) == 1:
503
+ logger.info(f"Starting spider: {spider_names[0]}")
741
504
  else:
742
- logger.info(f"All {total} crawlers completed successfully!")
743
-
744
- # Return statistics results
745
- return {
746
- 'total': total,
747
- 'successful': successful,
748
- 'failed': len(failed),
749
- 'success_rate': (successful / total) * 100 if total > 0 else 0,
750
- 'context_stats': self.context.get_stats()
751
- }
752
-
505
+ logger.info(f"Starting spiders: {', '.join(spider_names)}")
506
+
507
+ tasks = []
508
+ for spider_cls in spider_classes:
509
+ merged_settings = self._merge_settings(settings)
510
+ crawler = ModernCrawler(spider_cls, merged_settings)
511
+ self._crawlers.append(crawler)
512
+
513
+ task = asyncio.create_task(self._run_with_semaphore(crawler))
514
+ tasks.append(task)
515
+
516
+ results = await asyncio.gather(*tasks, return_exceptions=True)
517
+
518
+ # 处理结果
519
+ successful = sum(1 for r in results if not isinstance(r, Exception))
520
+ failed = len(results) - successful
521
+
522
+ self._logger.info(f"Crawl completed: {successful} successful, {failed} failed")
523
+
524
+ return results
525
+
753
526
  finally:
754
- # Phase 7: Cleanup and shutdown
755
- await self.stop_monitoring()
756
- await self._cleanup_process()
757
-
758
- async def _cleanup_process(self):
759
- """Clean up process resources"""
760
- try:
761
- # Wait for all active crawlers to complete
762
- if self.crawlers:
763
- close_tasks = [crawler.close() for crawler in self.crawlers]
764
- await asyncio.gather(*close_tasks, return_exceptions=True)
765
- self.crawlers.clear()
766
-
767
- # Clean up active tasks
768
- if self._active_tasks:
769
- for task in list(self._active_tasks):
770
- if not task.done():
771
- task.cancel()
772
- await asyncio.gather(*self._active_tasks, return_exceptions=True)
773
- self._active_tasks.clear()
774
-
775
- logger.debug("Process resources cleanup completed")
776
-
777
- except Exception as e:
778
- logger.error(f"Error cleaning up process resources: {e}", exc_info=True)
779
-
780
- def get_process_stats(self) -> Dict[str, Any]:
781
- """Get process statistics information"""
782
- context_stats = self.context.get_stats()
783
-
784
- return {
785
- 'context': context_stats,
786
- 'performance': self._performance_stats.copy(),
787
- 'crawlers': {
788
- 'total_registered': len(self._spider_registry),
789
- 'active_crawlers': len(self.crawlers),
790
- 'max_concurrency': self.max_concurrency
791
- },
792
- 'registry': {
793
- 'spider_names': list(self._spider_registry.keys()),
794
- 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
795
- }
796
- }
797
-
798
- def _resolve_spiders_to_run(
799
- self,
800
- spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
801
- ) -> List[Type[Spider]]:
802
- """
803
- Resolve input to spider class list
804
-
805
- Supports various input formats and validates uniqueness
806
- """
807
- inputs = self._normalize_inputs(spiders_input)
808
- seen_spider_names: Set[str] = set()
809
- spider_classes: List[Type[Spider]] = []
810
-
811
- for item in inputs:
812
- try:
813
- spider_cls = self._resolve_spider_class(item)
814
- spider_name = getattr(spider_cls, 'name', None)
815
-
816
- if not spider_name:
817
- raise ValueError(f"Spider class {spider_cls.__name__} missing 'name' attribute")
818
-
819
- if spider_name in seen_spider_names:
820
- raise ValueError(
821
- f"Duplicate spider name '{spider_name}' in this run.\n"
822
- f"Ensure each spider's name attribute is unique in this run."
823
- )
824
-
825
- seen_spider_names.add(spider_name)
826
- spider_classes.append(spider_cls)
827
-
828
- logger.debug(
829
- f"Spider resolved successfully: {item} -> {spider_cls.__name__} (name='{spider_name}')")
830
-
831
- except Exception as e:
832
- logger.error(f"Failed to resolve spider: {item} - {e}")
833
- raise
834
-
835
- return spider_classes
836
-
837
- @staticmethod
838
- def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
839
- """
840
- Normalize input to list
841
-
842
- Supports more input types and provides better error information
843
- """
844
- if isinstance(spiders_input, (type, str)):
845
- return [spiders_input]
846
- elif isinstance(spiders_input, (list, tuple, set)):
847
- spider_list = list(spiders_input)
848
- if not spider_list:
849
- raise ValueError("Spider list cannot be empty")
850
- return spider_list
851
- else:
852
- raise TypeError(
853
- f"Unsupported spiders parameter type: {type(spiders_input)}\n"
854
- f"Supported types: Spider class, name string, or their list/tuple/set"
855
- )
856
-
857
- def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
858
- """
859
- Resolve single input item to spider class
860
-
861
- Provides better error prompts and debugging information
862
- """
863
- if isinstance(item, type) and issubclass(item, Spider):
864
- # Direct Spider class
865
- return item
866
- elif isinstance(item, str):
867
- # String name, need to look up registry
868
- spider_cls = self._spider_registry.get(item)
869
- if not spider_cls:
870
- available_spiders = list(self._spider_registry.keys())
871
- raise ValueError(
872
- f"Spider named '{item}' not found.\n"
873
- f"Registered spiders: {available_spiders}\n"
874
- f"Please check if the spider name is correct, or ensure the spider has been properly imported and registered."
875
- )
876
- return spider_cls
877
- else:
878
- raise TypeError(
879
- f"Invalid type {type(item)}: {item}\n"
880
- f"Must be Spider class or string name.\n"
881
- f"Example: MySpider or 'my_spider'"
882
- )
883
-
884
- async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
885
- """
886
- Spider running function limited by semaphore
887
-
888
- Includes enhanced error handling and monitoring functionality
889
- """
890
- task = asyncio.current_task()
891
- crawler = None
892
-
893
- try:
894
- # Register task
895
- if task:
896
- self._active_tasks.add(task)
897
-
898
- # Acquire concurrency permit
899
- await self.semaphore.acquire()
900
-
901
- # start_msg = f"[{seq}/{total}] Initializing spider: {spider_cls.__name__}"
902
- # logger.info(start_msg)
903
-
904
- # Create and run crawler
905
- crawler = Crawler(spider_cls, self.settings, self.context)
906
- self.crawlers.add(crawler)
907
-
908
- # Record start time
909
- start_time = time.time()
910
-
911
- # Run crawler
527
+ self._end_time = time.time()
528
+ if self._start_time:
529
+ duration = self._end_time - self._start_time
530
+ self._logger.info(f"Total execution time: {duration:.2f}s")
531
+
532
+ async def _run_with_semaphore(self, crawler: ModernCrawler):
533
+ """在信号量控制下运行爬虫"""
534
+ async with self._semaphore:
912
535
  await crawler.crawl()
913
-
914
- # Calculate runtime
915
- duration = time.time() - start_time
916
-
917
- end_msg = (
918
- f"[{seq}/{total}] Crawler completed: {spider_cls.__name__}, "
919
- f"took: {duration:.2f} seconds"
920
- )
921
- logger.info(end_msg)
922
-
923
- # Record success statistics
924
- self._performance_stats['successful_requests'] += 1
925
-
926
- except Exception as e:
927
- # Record failure statistics
928
- self._performance_stats['failed_requests'] += 1
929
-
930
- error_msg = f"Spider {spider_cls.__name__} execution failed: {e}"
931
- logger.error(error_msg, exc_info=True)
932
-
933
- # Record error information to context
934
- if hasattr(self, 'context'):
935
- self.context.increment_failed(error_msg)
936
-
937
- raise
938
- finally:
939
- # Clean up resources
536
+ return crawler
537
+
538
+ def _resolve_spider_class(self, spider_cls_or_name):
539
+ """解析Spider类"""
540
+ if isinstance(spider_cls_or_name, str):
541
+ # 从注册表中查找
940
542
  try:
941
- if crawler and crawler in self.crawlers:
942
- self.crawlers.remove(crawler)
943
-
944
- if task and task in self._active_tasks:
945
- self._active_tasks.remove(task)
946
-
947
- self.semaphore.release()
948
-
949
- except Exception as cleanup_error:
950
- logger.warning(f"Error cleaning up resources: {cleanup_error}")
951
-
952
- def _shutdown(self, _signum, _frame):
953
- """
954
- Graceful shutdown signal handling
955
-
956
- Provides better shutdown experience and resource cleanup
957
- """
958
- signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
959
- logger.warning(f"Received shutdown signal {signal_name}, stopping all crawlers...")
960
-
961
- # Set shutdown event
962
- if hasattr(self, '_shutdown_event'):
963
- self._shutdown_event.set()
964
-
965
- # Stop all crawler engines
966
- for crawler in list(self.crawlers):
967
- if crawler.engine:
968
- crawler.engine.running = False
969
- crawler.engine.normal = False
970
- logger.debug(f"Crawler engine stopped: {getattr(crawler.spider, 'name', 'Unknown')}")
971
-
972
- # Create shutdown task
973
- asyncio.create_task(self._wait_for_shutdown())
974
-
975
- logger.info("Shutdown command sent, waiting for crawlers to complete current tasks...")
976
-
977
- async def _wait_for_shutdown(self):
978
- """
979
- Wait for all active tasks to complete
980
-
981
- Provides better shutdown time control and progress feedback
982
- """
983
- try:
984
- # Stop monitoring task
985
- await self.stop_monitoring()
986
-
987
- # Wait for active tasks to complete
988
- pending = [t for t in self._active_tasks if not t.done()]
989
-
990
- if pending:
991
- logger.info(
992
- f"Waiting for {len(pending)} active tasks to complete..."
993
- f"(Maximum wait time: 30 seconds)"
994
- )
995
-
996
- # Set timeout
997
- try:
998
- await asyncio.wait_for(
999
- asyncio.gather(*pending, return_exceptions=True),
1000
- timeout=30.0
1001
- )
1002
- except asyncio.TimeoutError:
1003
- logger.warning("Some tasks timed out, forcing cancellation...")
1004
-
1005
- # Force cancel timed out tasks
1006
- for task in pending:
1007
- if not task.done():
1008
- task.cancel()
1009
-
1010
- # Wait for cancellation to complete
1011
- await asyncio.gather(*pending, return_exceptions=True)
1012
-
1013
- # Final cleanup
1014
- await self._cleanup_process()
1015
-
1016
- # Output final statistics
1017
- final_stats = self.context.get_stats()
1018
- logger.info(
1019
- f"All crawlers gracefully shut down 👋\n"
1020
- f" - Total crawlers: {final_stats['total_crawlers']}\n"
1021
- f" - Successfully completed: {final_stats['completed_crawlers']}\n"
1022
- f" - Failed: {final_stats['failed_crawlers']}\n"
1023
- f" - Success rate: {final_stats['success_rate']:.1f}%\n"
1024
- f" - Total runtime: {final_stats['duration_seconds']} seconds"
1025
- )
1026
-
1027
- except Exception as e:
1028
- logger.error(f"Error during shutdown process: {e}", exc_info=True)
1029
-
1030
- @classmethod
1031
- def _get_default_settings(cls) -> SettingManager:
1032
- """
1033
- Load default configuration
1034
-
1035
- Provides better error handling and fallback strategy
1036
- """
1037
- try:
1038
- settings = get_settings()
1039
- logger.debug("Default configuration loaded successfully")
1040
- return settings
1041
- except Exception as e:
1042
- logger.warning(f"Unable to load default configuration: {e}, using empty configuration")
1043
- return SettingManager()
1044
-
1045
- def _log_startup_info(self):
1046
- """Print startup information, including run mode and key configuration checks"""
1047
- # Get run mode
1048
- run_mode = self.settings.get('RUN_MODE', 'standalone')
1049
-
1050
- # Get version number
1051
- version = self.settings.get('VERSION', '1.0.0')
1052
- if not version or version == 'None':
1053
- version = '1.0.0'
1054
-
1055
- # Print framework start info
1056
- logger.info(f"Crawlo Framework Started {version}")
1057
-
1058
- # Add mode info if available
1059
- mode_info = self.settings.get('_mode_info')
1060
- if mode_info:
1061
- logger.info(mode_info)
1062
- else:
1063
- # 如果没有_mode_info,添加默认信息
1064
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
1065
-
1066
- # Get actual queue type
1067
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
1068
-
1069
- # Display information based on run mode and queue type combination
1070
- if run_mode == 'distributed':
1071
- logger.info("Run Mode: distributed")
1072
- logger.info("Distributed Mode - Multi-node collaboration supported")
1073
- # Show Redis configuration
1074
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
1075
- redis_port = self.settings.get('REDIS_PORT', 6379)
1076
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
1077
- elif run_mode == 'standalone':
1078
- if queue_type == 'redis':
1079
- logger.info("Run Mode: standalone+redis")
1080
- # Show Redis configuration
1081
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
1082
- redis_port = self.settings.get('REDIS_PORT', 6379)
1083
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
1084
- elif queue_type == 'auto':
1085
- logger.info("Run Mode: standalone+auto")
1086
- else: # memory
1087
- logger.info("Run Mode: standalone")
1088
- else:
1089
- logger.info(f"Run Mode: {run_mode}")
1090
-
1091
-
1092
- # === Utility functions ===
1093
-
1094
- def create_crawler_with_optimizations(
1095
- spider_cls: Type[Spider],
1096
- settings: Optional[SettingManager] = None,
1097
- **optimization_kwargs
1098
- ) -> Crawler:
1099
- """
1100
- Create an optimized crawler instance
1101
-
1102
- :param spider_cls: Spider class
1103
- :param settings: Settings manager
1104
- :param optimization_kwargs: Optimization parameters
1105
- :return: Crawler instance
1106
- """
1107
- if settings is None:
1108
- settings = SettingManager()
1109
-
1110
- # Apply optimization configuration
1111
- for key, value in optimization_kwargs.items():
1112
- settings.set(key, value)
1113
-
1114
- context = CrawlerContext()
1115
- return Crawler(spider_cls, settings, context)
1116
-
1117
-
1118
- def create_process_with_large_scale_config(
1119
- config_type: str = 'balanced',
1120
- concurrency: int = 16,
1121
- **kwargs
1122
- ) -> CrawlerProcess:
1123
- """
1124
- Create a process manager that supports large-scale optimization
1125
-
1126
- :param config_type: Configuration type ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1127
- :param concurrency: Concurrency count
1128
- :param kwargs: Other parameters
1129
- :return: Process manager
1130
- """
1131
- try:
1132
- from crawlo.utils.large_scale_config import LargeScaleConfig
1133
-
1134
- # Get optimization configuration
1135
- config_methods = {
1136
- 'conservative': LargeScaleConfig.conservative_config,
1137
- 'balanced': LargeScaleConfig.balanced_config,
1138
- 'aggressive': LargeScaleConfig.aggressive_config,
1139
- 'memory_optimized': LargeScaleConfig.memory_optimized_config
1140
- }
1141
-
1142
- if config_type not in config_methods:
1143
- logger.warning(f"Unknown configuration type: {config_type}, using default configuration")
1144
- settings = SettingManager()
543
+ from crawlo.spider import get_global_spider_registry
544
+ registry = get_global_spider_registry()
545
+ if spider_cls_or_name in registry:
546
+ return registry[spider_cls_or_name]
547
+ else:
548
+ # 如果在注册表中找不到,尝试通过spider_modules导入所有模块来触发注册
549
+ # 然后再次检查注册表
550
+ if hasattr(self, '_spider_modules') and self._spider_modules:
551
+ for module_path in self._spider_modules:
552
+ try:
553
+ # 导入模块来触发爬虫注册
554
+ __import__(module_path)
555
+ except ImportError:
556
+ pass # 忽略导入错误
557
+
558
+ # 再次检查注册表
559
+ if spider_cls_or_name in registry:
560
+ return registry[spider_cls_or_name]
561
+
562
+ # 如果仍然找不到,尝试自动发现模式
563
+ if hasattr(self, '_spider_modules') and self._spider_modules:
564
+ self._auto_discover_spider_modules(self._spider_modules)
565
+ if spider_cls_or_name in registry:
566
+ return registry[spider_cls_or_name]
567
+
568
+ # 如果仍然找不到,尝试直接导入模块
569
+ try:
570
+ # 假设格式为 module.SpiderClass
571
+ if '.' in spider_cls_or_name:
572
+ module_path, class_name = spider_cls_or_name.rsplit('.', 1)
573
+ module = __import__(module_path, fromlist=[class_name])
574
+ spider_class = getattr(module, class_name)
575
+ # 注册到全局注册表
576
+ registry[spider_class.name] = spider_class
577
+ return spider_class
578
+ else:
579
+ # 尝试在spider_modules中查找
580
+ if hasattr(self, '_spider_modules') and self._spider_modules:
581
+ for module_path in self._spider_modules:
582
+ try:
583
+ # 构造完整的模块路径
584
+ full_module_path = f"{module_path}.{spider_cls_or_name}"
585
+ module = __import__(full_module_path, fromlist=[spider_cls_or_name])
586
+ # 获取模块中的Spider子类
587
+ for attr_name in dir(module):
588
+ attr_value = getattr(module, attr_name)
589
+ if (isinstance(attr_value, type) and
590
+ issubclass(attr_value, registry.__class__.__bases__[0]) and
591
+ hasattr(attr_value, 'name') and
592
+ attr_value.name == spider_cls_or_name):
593
+ # 注册到全局注册表
594
+ registry[spider_cls_or_name] = attr_value
595
+ return attr_value
596
+ except ImportError:
597
+ continue
598
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
599
+ except (ImportError, AttributeError):
600
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
601
+ except ImportError:
602
+ raise ValueError(f"Cannot resolve spider name '{spider_cls_or_name}'")
1145
603
  else:
1146
- config = config_methods[config_type](concurrency)
1147
- settings = SettingManager()
1148
- settings.update(config)
1149
-
1150
- return CrawlerProcess(
1151
- settings=settings,
1152
- max_concurrency=concurrency,
1153
- **kwargs
1154
- )
1155
-
1156
- except ImportError:
1157
- logger.warning("Large-scale configuration module does not exist, using default configuration")
1158
- return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1159
-
1160
-
1161
- # === Exported interfaces ===
1162
-
1163
- __all__ = [
1164
- 'Crawler',
1165
- 'CrawlerProcess',
1166
- 'CrawlerContext',
1167
- 'create_crawler_with_optimizations',
1168
- 'create_process_with_large_scale_config'
1169
- ]
604
+ return spider_cls_or_name
605
+
606
+ def _merge_settings(self, additional_settings):
607
+ """合并配置"""
608
+ if not additional_settings:
609
+ return self._settings
610
+
611
+ # 这里可以实现更复杂的配置合并逻辑
612
+ from crawlo.settings.setting_manager import SettingManager
613
+ merged = SettingManager()
614
+
615
+ # 复制基础配置
616
+ if self._settings:
617
+ merged.update_attributes(self._settings.__dict__)
618
+
619
+ # 应用额外配置
620
+ merged.update_attributes(additional_settings)
621
+
622
+ return merged
623
+
624
+ def get_metrics(self) -> Dict[str, Any]:
625
+ """获取整体指标"""
626
+ total_duration = 0.0
627
+ if self._start_time and self._end_time:
628
+ total_duration = self._end_time - self._start_time
629
+
630
+ crawler_metrics = [crawler.metrics for crawler in self._crawlers]
631
+
632
+ return {
633
+ 'total_duration': total_duration,
634
+ 'crawler_count': len(self._crawlers),
635
+ 'total_requests': sum(m.request_count for m in crawler_metrics),
636
+ 'total_success': sum(m.success_count for m in crawler_metrics),
637
+ 'total_errors': sum(m.error_count for m in crawler_metrics),
638
+ 'average_success_rate': sum(m.get_success_rate() for m in crawler_metrics) / len(crawler_metrics) if crawler_metrics else 0.0
639
+ }