crawlo 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (219) hide show
  1. crawlo/__init__.py +63 -63
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +322 -314
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +2 -2
  16. crawlo/core/engine.py +365 -365
  17. crawlo/core/processor.py +40 -40
  18. crawlo/core/scheduler.py +256 -256
  19. crawlo/crawler.py +1166 -1168
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +226 -226
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +39 -39
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +57 -57
  34. crawlo/extension/log_stats.py +81 -81
  35. crawlo/extension/logging_extension.py +52 -45
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/filters/__init__.py +154 -154
  40. crawlo/filters/aioredis_filter.py +234 -234
  41. crawlo/filters/memory_filter.py +269 -269
  42. crawlo/items/__init__.py +23 -23
  43. crawlo/items/base.py +21 -21
  44. crawlo/items/fields.py +52 -52
  45. crawlo/items/items.py +104 -104
  46. crawlo/middleware/__init__.py +21 -21
  47. crawlo/middleware/default_header.py +132 -132
  48. crawlo/middleware/download_delay.py +104 -104
  49. crawlo/middleware/middleware_manager.py +135 -135
  50. crawlo/middleware/offsite.py +123 -115
  51. crawlo/middleware/proxy.py +386 -386
  52. crawlo/middleware/request_ignore.py +86 -86
  53. crawlo/middleware/response_code.py +163 -163
  54. crawlo/middleware/response_filter.py +136 -136
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/middleware/simple_proxy.py +65 -65
  57. crawlo/mode_manager.py +187 -148
  58. crawlo/network/__init__.py +21 -21
  59. crawlo/network/request.py +379 -379
  60. crawlo/network/response.py +359 -359
  61. crawlo/pipelines/__init__.py +21 -21
  62. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  63. crawlo/pipelines/console_pipeline.py +39 -39
  64. crawlo/pipelines/csv_pipeline.py +316 -316
  65. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  66. crawlo/pipelines/json_pipeline.py +218 -218
  67. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  68. crawlo/pipelines/mongo_pipeline.py +131 -131
  69. crawlo/pipelines/mysql_pipeline.py +318 -318
  70. crawlo/pipelines/pipeline_manager.py +75 -75
  71. crawlo/pipelines/redis_dedup_pipeline.py +166 -166
  72. crawlo/project.py +325 -297
  73. crawlo/queue/pqueue.py +37 -37
  74. crawlo/queue/queue_manager.py +379 -379
  75. crawlo/queue/redis_priority_queue.py +306 -306
  76. crawlo/settings/__init__.py +7 -7
  77. crawlo/settings/default_settings.py +225 -225
  78. crawlo/settings/setting_manager.py +198 -198
  79. crawlo/spider/__init__.py +639 -639
  80. crawlo/stats_collector.py +59 -59
  81. crawlo/subscriber.py +129 -129
  82. crawlo/task_manager.py +30 -30
  83. crawlo/templates/crawlo.cfg.tmpl +10 -10
  84. crawlo/templates/project/__init__.py.tmpl +3 -3
  85. crawlo/templates/project/items.py.tmpl +17 -17
  86. crawlo/templates/project/middlewares.py.tmpl +118 -118
  87. crawlo/templates/project/pipelines.py.tmpl +96 -96
  88. crawlo/templates/project/settings.py.tmpl +266 -261
  89. crawlo/templates/project/settings_distributed.py.tmpl +179 -174
  90. crawlo/templates/project/settings_gentle.py.tmpl +60 -95
  91. crawlo/templates/project/settings_high_performance.py.tmpl +130 -125
  92. crawlo/templates/project/settings_minimal.py.tmpl +34 -29
  93. crawlo/templates/project/settings_simple.py.tmpl +101 -96
  94. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  95. crawlo/templates/run.py.tmpl +38 -47
  96. crawlo/templates/spider/spider.py.tmpl +143 -143
  97. crawlo/tools/__init__.py +200 -200
  98. crawlo/tools/anti_crawler.py +268 -268
  99. crawlo/tools/authenticated_proxy.py +240 -240
  100. crawlo/tools/data_formatter.py +225 -225
  101. crawlo/tools/data_validator.py +180 -180
  102. crawlo/tools/date_tools.py +289 -289
  103. crawlo/tools/distributed_coordinator.py +388 -388
  104. crawlo/tools/encoding_converter.py +127 -127
  105. crawlo/tools/request_tools.py +82 -82
  106. crawlo/tools/retry_mechanism.py +224 -224
  107. crawlo/tools/scenario_adapter.py +262 -262
  108. crawlo/tools/text_cleaner.py +232 -232
  109. crawlo/utils/__init__.py +34 -34
  110. crawlo/utils/batch_processor.py +259 -259
  111. crawlo/utils/controlled_spider_mixin.py +439 -439
  112. crawlo/utils/db_helper.py +343 -343
  113. crawlo/utils/enhanced_error_handler.py +356 -356
  114. crawlo/utils/env_config.py +142 -142
  115. crawlo/utils/error_handler.py +123 -123
  116. crawlo/utils/func_tools.py +82 -82
  117. crawlo/utils/large_scale_config.py +286 -286
  118. crawlo/utils/large_scale_helper.py +344 -344
  119. crawlo/utils/log.py +199 -146
  120. crawlo/utils/performance_monitor.py +285 -285
  121. crawlo/utils/queue_helper.py +175 -175
  122. crawlo/utils/redis_connection_pool.py +351 -351
  123. crawlo/utils/redis_key_validator.py +198 -198
  124. crawlo/utils/request.py +267 -267
  125. crawlo/utils/request_serializer.py +218 -218
  126. crawlo/utils/spider_loader.py +61 -61
  127. crawlo/utils/system.py +11 -11
  128. crawlo/utils/tools.py +4 -4
  129. crawlo/utils/url.py +39 -39
  130. {crawlo-1.3.1.dist-info → crawlo-1.3.3.dist-info}/METADATA +1020 -1020
  131. crawlo-1.3.3.dist-info/RECORD +219 -0
  132. examples/__init__.py +7 -7
  133. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
  134. tests/__init__.py +7 -7
  135. tests/advanced_tools_example.py +275 -275
  136. tests/authenticated_proxy_example.py +107 -107
  137. tests/cleaners_example.py +160 -160
  138. tests/config_validation_demo.py +142 -142
  139. tests/controlled_spider_example.py +205 -205
  140. tests/date_tools_example.py +180 -180
  141. tests/debug_pipelines.py +66 -66
  142. tests/dynamic_loading_example.py +523 -523
  143. tests/dynamic_loading_test.py +104 -104
  144. tests/env_config_example.py +133 -133
  145. tests/error_handling_example.py +171 -171
  146. tests/redis_key_validation_demo.py +130 -130
  147. tests/request_params_example.py +150 -150
  148. tests/response_improvements_example.py +144 -144
  149. tests/test_advanced_tools.py +148 -148
  150. tests/test_all_redis_key_configs.py +145 -145
  151. tests/test_authenticated_proxy.py +141 -141
  152. tests/test_cleaners.py +54 -54
  153. tests/test_comprehensive.py +146 -146
  154. tests/test_config_consistency.py +80 -80
  155. tests/test_config_merge.py +152 -152
  156. tests/test_config_validator.py +182 -182
  157. tests/test_crawlo_proxy_integration.py +108 -108
  158. tests/test_date_tools.py +123 -123
  159. tests/test_default_header_middleware.py +158 -158
  160. tests/test_distributed.py +65 -65
  161. tests/test_double_crawlo_fix.py +207 -207
  162. tests/test_double_crawlo_fix_simple.py +124 -124
  163. tests/test_download_delay_middleware.py +221 -221
  164. tests/test_downloader_proxy_compatibility.py +268 -268
  165. tests/test_dynamic_downloaders_proxy.py +124 -124
  166. tests/test_dynamic_proxy.py +92 -92
  167. tests/test_dynamic_proxy_config.py +146 -146
  168. tests/test_dynamic_proxy_real.py +109 -109
  169. tests/test_edge_cases.py +303 -303
  170. tests/test_enhanced_error_handler.py +270 -270
  171. tests/test_env_config.py +121 -121
  172. tests/test_error_handler_compatibility.py +112 -112
  173. tests/test_final_validation.py +153 -153
  174. tests/test_framework_env_usage.py +103 -103
  175. tests/test_integration.py +169 -169
  176. tests/test_item_dedup_redis_key.py +122 -122
  177. tests/test_mode_consistency.py +51 -51
  178. tests/test_offsite_middleware.py +221 -221
  179. tests/test_parsel.py +29 -29
  180. tests/test_performance.py +327 -327
  181. tests/test_proxy_api.py +264 -264
  182. tests/test_proxy_health_check.py +32 -32
  183. tests/test_proxy_middleware.py +121 -121
  184. tests/test_proxy_middleware_enhanced.py +216 -216
  185. tests/test_proxy_middleware_integration.py +136 -136
  186. tests/test_proxy_middleware_refactored.py +184 -184
  187. tests/test_proxy_providers.py +56 -56
  188. tests/test_proxy_stats.py +19 -19
  189. tests/test_proxy_strategies.py +59 -59
  190. tests/test_queue_manager_double_crawlo.py +173 -173
  191. tests/test_queue_manager_redis_key.py +176 -176
  192. tests/test_random_user_agent.py +72 -72
  193. tests/test_real_scenario_proxy.py +195 -195
  194. tests/test_redis_config.py +28 -28
  195. tests/test_redis_connection_pool.py +294 -294
  196. tests/test_redis_key_naming.py +181 -181
  197. tests/test_redis_key_validator.py +123 -123
  198. tests/test_redis_queue.py +224 -224
  199. tests/test_request_ignore_middleware.py +182 -182
  200. tests/test_request_params.py +111 -111
  201. tests/test_request_serialization.py +70 -70
  202. tests/test_response_code_middleware.py +349 -349
  203. tests/test_response_filter_middleware.py +427 -427
  204. tests/test_response_improvements.py +152 -152
  205. tests/test_retry_middleware.py +241 -241
  206. tests/test_scheduler.py +252 -252
  207. tests/test_scheduler_config_update.py +133 -133
  208. tests/test_simple_response.py +61 -61
  209. tests/test_telecom_spider_redis_key.py +205 -205
  210. tests/test_template_content.py +87 -87
  211. tests/test_template_redis_key.py +134 -134
  212. tests/test_tools.py +159 -159
  213. tests/test_user_agents.py +96 -96
  214. tests/tools_example.py +260 -260
  215. tests/verify_distributed.py +117 -117
  216. crawlo-1.3.1.dist-info/RECORD +0 -219
  217. {crawlo-1.3.1.dist-info → crawlo-1.3.3.dist-info}/WHEEL +0 -0
  218. {crawlo-1.3.1.dist-info → crawlo-1.3.3.dist-info}/entry_points.txt +0 -0
  219. {crawlo-1.3.1.dist-info → crawlo-1.3.3.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,1169 +1,1167 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- Crawlo Crawler Module
5
- ====================
6
- Provides crawler process management and runtime core functionality.
7
-
8
- Core Components:
9
- - Crawler: Single crawler runtime instance, managing Spider and engine lifecycle
10
- - CrawlerProcess: Crawler process manager, supporting multi-crawler concurrent scheduling and resource management
11
-
12
- Features:
13
- - Intelligent concurrency control and resource management
14
- - Graceful shutdown and signal handling
15
- - Statistics monitoring and performance tracking
16
- - Automatic module discovery and registration
17
- - Error recovery and retry mechanism
18
- - Large-scale crawler optimization support
19
-
20
- Example Usage:
21
- # Single crawler run
22
- crawler = Crawler(MySpider, settings)
23
- await crawler.crawl()
24
-
25
- # Multi-crawler concurrent management
26
- process = CrawlerProcess()
27
- await process.crawl([Spider1, Spider2])
28
- """
29
- from __future__ import annotations
30
- import asyncio
31
- import signal
32
- import time
33
- import threading
34
- from typing import Type, Optional, Set, List, Union, Dict, Any
35
- from .spider import Spider, get_global_spider_registry
36
- from .core.engine import Engine
37
- from .subscriber import Subscriber
38
- from .extension import ExtensionManager
39
- from crawlo.utils.log import get_logger
40
- from .stats_collector import StatsCollector
41
- from .event import spider_opened, spider_closed
42
- from .settings.setting_manager import SettingManager
43
- from crawlo.project import merge_settings, get_settings
44
-
45
- logger = get_logger(__name__)
46
-
47
-
48
- class CrawlerContext:
49
- """
50
- Crawler context manager
51
- Provides shared state and resource management
52
- """
53
-
54
- def __init__(self):
55
- self.start_time = time.time()
56
- self.total_crawlers = 0
57
- self.active_crawlers = 0
58
- self.completed_crawlers = 0
59
- self.failed_crawlers = 0
60
- self.error_log = []
61
- self._lock = threading.RLock()
62
-
63
- def increment_total(self):
64
- with self._lock:
65
- self.total_crawlers += 1
66
-
67
- def increment_active(self):
68
- with self._lock:
69
- self.active_crawlers += 1
70
-
71
- def decrement_active(self):
72
- with self._lock:
73
- self.active_crawlers -= 1
74
-
75
- def increment_completed(self):
76
- with self._lock:
77
- self.completed_crawlers += 1
78
-
79
- def increment_failed(self, error: str):
80
- with self._lock:
81
- self.failed_crawlers += 1
82
- self.error_log.append({
83
- 'timestamp': time.time(),
84
- 'error': error
85
- })
86
-
87
- def get_stats(self) -> Dict[str, Any]:
88
- with self._lock:
89
- duration = time.time() - self.start_time
90
- return {
91
- 'total_crawlers': self.total_crawlers,
92
- 'active_crawlers': self.active_crawlers,
93
- 'completed_crawlers': self.completed_crawlers,
94
- 'failed_crawlers': self.failed_crawlers,
95
- 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
96
- 'duration_seconds': round(duration, 2),
97
- 'error_count': len(self.error_log)
98
- }
99
-
100
-
101
- class Crawler:
102
- """
103
- Single crawler runtime instance, managing Spider and engine lifecycle
104
-
105
- Provides functionality:
106
- - Spider lifecycle management (initialization, running, closing)
107
- - Engine component coordination management
108
- - Configuration merging and validation
109
- - Statistics data collection
110
- - Extension management
111
- - Exception handling and cleanup
112
- """
113
-
114
- def __init__(
115
- self,
116
- spider_cls: Type[Spider],
117
- settings: SettingManager,
118
- context: Optional[CrawlerContext] = None
119
- ):
120
- self.spider_cls = spider_cls
121
- self.spider: Optional[Spider] = None
122
- self.engine: Optional[Engine] = None
123
- self.stats: Optional[StatsCollector] = None
124
- self.subscriber: Optional[Subscriber] = None
125
- self.extension: Optional[ExtensionManager] = None
126
- self.settings: SettingManager = settings.copy()
127
- self.context = context or CrawlerContext()
128
-
129
- # State management
130
- self._closed = False
131
- self._close_lock = asyncio.Lock()
132
- self._start_time = None
133
- self._end_time = None
134
-
135
- # Performance monitoring
136
- self._performance_metrics = {
137
- 'initialization_time': 0,
138
- 'crawl_duration': 0,
139
- 'memory_peak': 0,
140
- 'request_count': 0,
141
- 'error_count': 0
142
- }
143
-
144
- # Initialize components
145
- self.subscriber = self._create_subscriber()
146
- self.spider = self._create_spider()
147
- self.engine = self._create_engine()
148
- self.stats = self._create_stats()
149
- # Note: Do not initialize extension manager here, let it initialize in the engine
150
-
151
- # Validate crawler state
152
- self._validate_crawler_state()
153
-
154
- # 打印启动信息,确保在日志系统配置之后打印
155
- self._log_startup_info()
156
-
157
- # 将启动爬虫名称的日志移到这里,确保在日志系统配置之后打印
158
- logger.info(f"Starting running {self.spider.name}")
159
-
160
- async def crawl(self):
161
- """
162
- Start the crawler core process
163
-
164
- Includes the following stages:
165
- 1. Initialization stage: Create all components
166
- 2. Validation stage: Check configuration and state
167
- 3. Running stage: Start the crawler engine
168
- 4. Cleanup stage: Resource release
169
- """
170
- init_start = time.time()
171
- self._start_time = init_start
172
-
173
- try:
174
- # Update context status
175
- self.context.increment_active()
176
-
177
- # Phase 1: Initialize components
178
- # Adjust component initialization order to ensure log output order meets requirements
179
- self.subscriber = self._create_subscriber()
180
- self.spider = self._create_spider()
181
- self.engine = self._create_engine()
182
- self.stats = self._create_stats()
183
- # Note: Do not initialize extension manager here, let it initialize in the engine
184
-
185
- # Record initialization time
186
- self._performance_metrics['initialization_time'] = time.time() - init_start
187
-
188
- # Phase 2: Validate state
189
- self._validate_crawler_state()
190
-
191
- # Phase 3: Display runtime configuration summary
192
- self._log_runtime_summary()
193
-
194
- # Phase 4: Start crawler
195
- crawl_start = time.time()
196
- await self.engine.start_spider(self.spider)
197
-
198
- # Record crawl time
199
- self._performance_metrics['crawl_duration'] = time.time() - crawl_start
200
- self._end_time = time.time()
201
-
202
- # Update context status
203
- self.context.increment_completed()
204
-
205
- logger.info(f"Spider {self.spider.name} completed, took {self._get_total_duration():.2f} seconds")
206
-
207
- except Exception as e:
208
- self._performance_metrics['error_count'] += 1
209
- self.context.increment_failed(str(e))
210
- logger.error(f"Spider {getattr(self.spider, 'name', 'Unknown')} failed to run: {e}", exc_info=True)
211
- raise
212
- finally:
213
- self.context.decrement_active()
214
- # Ensure resource cleanup
215
- await self._ensure_cleanup()
216
-
217
- def _log_runtime_summary(self):
218
- """Log runtime configuration summary"""
219
- # Get spider name
220
- spider_name = getattr(self.spider, 'name', 'Unknown')
221
-
222
- # Ensure spider name is a string and strip leading/trailing whitespace
223
- if spider_name:
224
- spider_name = str(spider_name).strip()
225
- else:
226
- spider_name = 'Unknown'
227
-
228
- logger.info(f"Starting running {spider_name}")
229
-
230
- def _validate_crawler_state(self):
231
- """
232
- Validate crawler state and configuration
233
- Ensure all necessary components are properly initialized
234
- """
235
- if not self.spider:
236
- raise RuntimeError("Spider instance not initialized")
237
- if not self.engine:
238
- raise RuntimeError("Engine not initialized")
239
- if not self.stats:
240
- raise RuntimeError("Stats collector not initialized")
241
- if not self.subscriber:
242
- raise RuntimeError("Event subscriber not initialized")
243
-
244
- # Check key configuration
245
- if not self.spider.name:
246
- raise ValueError("Spider name cannot be empty")
247
-
248
- logger.debug(f"Spider {self.spider.name} state validation passed")
249
-
250
- def _get_total_duration(self) -> float:
251
- """Get total runtime"""
252
- if self._start_time and self._end_time:
253
- return self._end_time - self._start_time
254
- return 0.0
255
-
256
- def _log_startup_info(self):
257
- """Print startup information, including run mode and key configuration checks"""
258
- # Get run mode
259
- run_mode = self.settings.get('RUN_MODE', 'standalone')
260
-
261
- # Get version number
262
- version = self.settings.get('VERSION', '1.0.0')
263
- if not version or version == 'None':
264
- version = '1.0.0'
265
-
266
- # Print framework start info
267
- logger.info(f"Crawlo Framework Started {version}")
268
-
269
- # Add mode info if available
270
- mode_info = self.settings.get('_mode_info')
271
- if mode_info:
272
- logger.info(mode_info)
273
- else:
274
- # 如果没有_mode_info,添加默认信息
275
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
276
-
277
- # Get actual queue type
278
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
279
-
280
- # Display information based on run mode and queue type combination
281
- if run_mode == 'distributed':
282
- logger.info("Run Mode: distributed")
283
- logger.info("Distributed Mode - Multi-node collaboration supported")
284
- # Show Redis configuration
285
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
286
- redis_port = self.settings.get('REDIS_PORT', 6379)
287
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
288
- elif run_mode == 'standalone':
289
- if queue_type == 'redis':
290
- logger.info("Run Mode: standalone+redis")
291
- # Show Redis configuration
292
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
293
- redis_port = self.settings.get('REDIS_PORT', 6379)
294
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
295
- elif queue_type == 'auto':
296
- logger.info("Run Mode: standalone+auto")
297
- else: # memory
298
- logger.info("Run Mode: standalone")
299
- else:
300
- logger.info(f"Run Mode: {run_mode}")
301
-
302
- async def _ensure_cleanup(self):
303
- """Ensure resource cleanup"""
304
- try:
305
- if not self._closed:
306
- await self.close()
307
- except Exception as e:
308
- logger.warning(f"Error cleaning up resources: {e}")
309
-
310
- def get_performance_metrics(self) -> Dict[str, Any]:
311
- """Get performance metrics"""
312
- metrics = self._performance_metrics.copy()
313
- metrics['total_duration'] = self._get_total_duration()
314
- if self.stats:
315
- # Add statistics data
316
- stats_data = getattr(self.stats, 'get_stats', lambda: {})()
317
- metrics.update(stats_data)
318
- return metrics
319
-
320
- @staticmethod
321
- def _create_subscriber() -> Subscriber:
322
- """Create event subscriber"""
323
- return Subscriber()
324
-
325
- def _create_spider(self) -> Spider:
326
- """
327
- Create and validate spider instance (enhanced version)
328
-
329
- Performs the following validations:
330
- - Spider name must exist
331
- - start_requests method must be callable
332
- - start_urls cannot be a string
333
- - parse method is recommended to exist
334
- """
335
- spider = self.spider_cls.create_instance(self)
336
-
337
- # Required attribute check
338
- if not getattr(spider, 'name', None):
339
- raise AttributeError(
340
- f"Spider class '{self.spider_cls.__name__}' must define 'name' attribute.\n"
341
- f"Example: name = 'my_spider'"
342
- )
343
-
344
- if not callable(getattr(spider, 'start_requests', None)):
345
- raise AttributeError(
346
- f"Spider '{spider.name}' must implement a callable 'start_requests' method.\n"
347
- f"Example: def start_requests(self): yield Request(url='...')"
348
- )
349
-
350
- # start_urls type check
351
- start_urls = getattr(spider, 'start_urls', [])
352
- if isinstance(start_urls, str):
353
- raise TypeError(
354
- f"Spider '{spider.name}' 'start_urls' must be a list or tuple, not a string.\n"
355
- f"Correct: start_urls = ['http://example.com']\n"
356
- f"Incorrect: start_urls = 'http://example.com'"
357
- )
358
-
359
- # parse method check (warning instead of error)
360
- if not callable(getattr(spider, 'parse', None)):
361
- logger.warning(
362
- f"Spider '{spider.name}' does not define 'parse' method.\n"
363
- f"Ensure all Requests specify a callback function, otherwise responses will be ignored."
364
- )
365
-
366
- # Set spider configuration
367
- self._set_spider(spider)
368
-
369
- logger.debug(f"Spider '{spider.name}' initialized successfully")
370
- return spider
371
-
372
- def _create_engine(self) -> Engine:
373
- """Create and initialize engine"""
374
- engine = Engine(self)
375
- engine.engine_start()
376
- logger.debug(f"Engine initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
377
- return engine
378
-
379
- def _create_stats(self) -> StatsCollector:
380
- """Create stats collector"""
381
- stats = StatsCollector(self)
382
- logger.debug(
383
- f"Stats collector initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
384
- return stats
385
-
386
- def _create_extension(self) -> ExtensionManager:
387
- """Create extension manager"""
388
- # Modify extension manager creation method, delay initialization until needed
389
- extension = ExtensionManager.create_instance(self)
390
- logger.debug(
391
- f"Extension manager initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
392
- return extension
393
-
394
- def _set_spider(self, spider: Spider):
395
- """
396
- Set spider configuration and event subscription
397
- Bind spider lifecycle events with subscriber
398
- """
399
- # Subscribe to spider lifecycle events
400
- self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
401
- self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
402
-
403
- # Merge spider custom configuration
404
- merge_settings(spider, self.settings)
405
-
406
- logger.debug(f"Spider '{spider.name}' configuration merged successfully")
407
-
408
- async def close(self, reason='finished') -> None:
409
- """
410
- Close crawler and clean up resources (enhanced version)
411
-
412
- Ensure closing only once and handle all cleanup operations
413
- """
414
- async with self._close_lock:
415
- if self._closed:
416
- return
417
-
418
- self._closed = True
419
- self._end_time = time.time()
420
-
421
- try:
422
- # Notify spider close event
423
- if self.subscriber:
424
- await self.subscriber.notify(spider_closed)
425
-
426
- # Statistics data collection
427
- if self.stats and self.spider:
428
- self.stats.close_spider(spider=self.spider, reason=reason)
429
- # Record statistics data
430
- try:
431
- from crawlo.commands.stats import record_stats
432
- record_stats(self)
433
- except ImportError:
434
- logger.debug("Statistics recording module does not exist, skipping statistics recording")
435
-
436
- logger.info(
437
- f"Spider '{getattr(self.spider, 'name', 'Unknown')}' closed, "
438
- f"reason: {reason}, took: {self._get_total_duration():.2f} seconds"
439
- )
440
-
441
- except Exception as e:
442
- logger.error(f"Error closing crawler: {e}", exc_info=True)
443
- finally:
444
- # Ensure resource cleanup
445
- await self._cleanup_resources()
446
-
447
- async def _cleanup_resources(self):
448
- """Clean up all resources"""
449
- cleanup_tasks = []
450
-
451
- # Engine cleanup
452
- if self.engine:
453
- try:
454
- cleanup_tasks.append(self.engine.close())
455
- except AttributeError:
456
- pass # Engine has no close method
457
-
458
- # Extension cleanup
459
- if self.extension:
460
- try:
461
- cleanup_tasks.append(self.extension.close())
462
- except AttributeError:
463
- pass
464
-
465
- # Stats collector cleanup
466
- if self.stats:
467
- try:
468
- cleanup_tasks.append(self.stats.close())
469
- except AttributeError:
470
- pass
471
-
472
- # Concurrently execute cleanup tasks
473
- if cleanup_tasks:
474
- await asyncio.gather(*cleanup_tasks, return_exceptions=True)
475
-
476
- logger.debug("Resource cleanup completed")
477
-
478
-
479
- class CrawlerProcess:
480
- """
481
- Crawler process manager
482
-
483
- Supported features:
484
- - Multi-crawler concurrent scheduling and resource management
485
- - Automatic module discovery and spider registration
486
- - Intelligent concurrency control and load balancing
487
- - Graceful shutdown and signal handling
488
- - Real-time status monitoring and statistics
489
- - Error recovery and retry mechanism
490
- - Large-scale crawler optimization support
491
-
492
- Usage example:
493
- # Basic usage
494
- process = CrawlerProcess()
495
- await process.crawl(MySpider)
496
-
497
- # Multi-crawler concurrency
498
- await process.crawl([Spider1, Spider2, 'spider_name'])
499
-
500
- # Custom concurrency
501
- process = CrawlerProcess(max_concurrency=8)
502
- """
503
-
504
- def __init__(
505
- self,
506
- settings: Optional[SettingManager] = None,
507
- max_concurrency: Optional[int] = None,
508
- spider_modules: Optional[List[str]] = None,
509
- enable_monitoring: bool = True
510
- ):
511
- # Basic configuration
512
- self.settings: SettingManager = settings or self._get_default_settings()
513
- self.crawlers: Set[Crawler] = set()
514
- self._active_tasks: Set[asyncio.Task] = set()
515
-
516
- # Context manager
517
- self.context = CrawlerContext()
518
-
519
- # Concurrency control configuration
520
- self.max_concurrency: int = (
521
- max_concurrency
522
- or self.settings.get('MAX_RUNNING_SPIDERS')
523
- or self.settings.get('CONCURRENCY', 3)
524
- )
525
- self.semaphore = asyncio.Semaphore(self.max_concurrency)
526
-
527
- # Monitoring configuration
528
- self.enable_monitoring = enable_monitoring
529
- self._monitoring_task = None
530
- self._shutdown_event = asyncio.Event()
531
-
532
- # Automatically discover and import spider modules
533
- if spider_modules:
534
- self.auto_discover(spider_modules)
535
-
536
- # Use snapshot of global registry (avoid subsequent import impact)
537
- self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
538
-
539
- # Performance monitoring
540
- self._performance_stats = {
541
- 'total_requests': 0,
542
- 'successful_requests': 0,
543
- 'failed_requests': 0,
544
- 'memory_usage_mb': 0,
545
- 'cpu_usage_percent': 0
546
- }
547
-
548
- # Register signal handlers
549
- signal.signal(signal.SIGINT, self._shutdown)
550
- signal.signal(signal.SIGTERM, self._shutdown)
551
-
552
- # 注意:移除在这里调用_log_startup_info(),因为这时候日志系统可能还没有被正确配置
553
- # 日志系统的配置是在project.py的get_settings函数中进行的,而CrawlerProcess的实例化
554
- # 是在get_settings函数返回之前进行的,所以这时候调用_log_startup_info()可能会导致
555
- # 日志信息没有被正确写入到日志文件中
556
-
557
- logger.debug(
558
- f"CrawlerProcess initialized successfully\n"
559
- f" - Max concurrent crawlers: {self.max_concurrency}\n"
560
- f" - Registered crawlers: {len(self._spider_registry)}\n"
561
- f" - Monitoring enabled: {self.enable_monitoring}"
562
- )
563
-
564
- async def start_monitoring(self):
565
- """Start monitoring task"""
566
- if not self.enable_monitoring:
567
- return
568
-
569
- self._monitoring_task = asyncio.create_task(self._monitor_loop())
570
- logger.debug("Monitoring task started")
571
-
572
- async def stop_monitoring(self):
573
- """Stop monitoring task"""
574
- if self._monitoring_task and not self._monitoring_task.done():
575
- self._monitoring_task.cancel()
576
- try:
577
- await self._monitoring_task
578
- except asyncio.CancelledError:
579
- pass
580
- logger.debug("Monitoring task stopped")
581
-
582
- async def _monitor_loop(self):
583
- """Monitoring loop, periodically collect and report status"""
584
- try:
585
- while not self._shutdown_event.is_set():
586
- await self._collect_performance_stats()
587
-
588
- # Output status every 30 seconds
589
- stats = self.context.get_stats()
590
- if stats['active_crawlers'] > 0:
591
- logger.debug(
592
- f"Crawler status: Active {stats['active_crawlers']}, "
593
- f"Completed {stats['completed_crawlers']}, "
594
- f"Failed {stats['failed_crawlers']}, "
595
- f"Success rate {stats['success_rate']:.1f}%"
596
- )
597
-
598
- await asyncio.sleep(30) # 30 second interval
599
-
600
- except asyncio.CancelledError:
601
- logger.debug("Monitoring loop cancelled")
602
- except Exception as e:
603
- logger.error(f"Monitoring loop error: {e}", exc_info=True)
604
-
605
- async def _collect_performance_stats(self):
606
- """Collect performance statistics data"""
607
- try:
608
- import psutil
609
- import os
610
-
611
- process = psutil.Process(os.getpid())
612
- memory_info = process.memory_info()
613
-
614
- self._performance_stats.update({
615
- 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
616
- 'cpu_usage_percent': round(process.cpu_percent(), 2)
617
- })
618
-
619
- except ImportError:
620
- # Skip performance monitoring when psutil is not available
621
- pass
622
- except Exception as e:
623
- logger.debug(f"Failed to collect performance statistics: {e}")
624
-
625
- @staticmethod
626
- def auto_discover(modules: List[str]):
627
- """
628
- Automatically import modules, trigger Spider class definition and registration (enhanced version)
629
-
630
- Supports recursive scanning and error recovery
631
- """
632
- import importlib
633
- import pkgutil
634
-
635
- discovered_count = 0
636
- error_count = 0
637
-
638
- for module_name in modules:
639
- try:
640
- module = importlib.import_module(module_name)
641
-
642
- if hasattr(module, '__path__'):
643
- # Package module, recursive scanning
644
- for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
645
- try:
646
- importlib.import_module(name)
647
- discovered_count += 1
648
- except Exception as sub_e:
649
- error_count += 1
650
- logger.warning(f"Failed to import submodule {name}: {sub_e}")
651
- else:
652
- # Single module
653
- importlib.import_module(module_name)
654
- discovered_count += 1
655
-
656
- logger.debug(f"Module scanned: {module_name}")
657
-
658
- except Exception as e:
659
- error_count += 1
660
- logger.error(f"Failed to scan module {module_name}: {e}", exc_info=True)
661
-
662
- logger.debug(
663
- f"Spider registration completed: {discovered_count} succeeded, {error_count} failed"
664
- )
665
-
666
- # === Public read-only interface: Avoid direct access to _spider_registry ===
667
-
668
- def get_spider_names(self) -> List[str]:
669
- """Get all registered spider names"""
670
- return list(self._spider_registry.keys())
671
-
672
- def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
673
- """Get spider class by name"""
674
- return self._spider_registry.get(name)
675
-
676
- def is_spider_registered(self, name: str) -> bool:
677
- """Check if a name is registered"""
678
- return name in self._spider_registry
679
-
680
- async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
681
- """
682
- Start one or more crawlers
683
-
684
- Enhanced features:
685
- - Intelligent concurrency control
686
- - Real-time monitoring and statistics
687
- - Error recovery and retry
688
- - Graceful shutdown handling
689
- """
690
- # Phase 1: Preprocessing and validation
691
- spider_classes_to_run = self._resolve_spiders_to_run(spiders)
692
- total = len(spider_classes_to_run)
693
-
694
- if total == 0:
695
- raise ValueError("At least one spider class or name must be provided")
696
-
697
- # Phase 2: Initialize context and monitoring
698
- for _ in range(total):
699
- self.context.increment_total()
700
-
701
- # Start monitoring task
702
- await self.start_monitoring()
703
-
704
- try:
705
- # Phase 3: Initialize context and monitoring
706
- spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
707
-
708
- logger.debug(
709
- f"Starting {total} crawlers\n"
710
- f" - Max concurrency: {self.max_concurrency}\n"
711
- f" - Spider list: {[cls.__name__ for cls in spider_classes_to_run]}"
712
- )
713
-
714
- # Phase 4: Stream start all crawler tasks
715
- tasks = [
716
- asyncio.create_task(
717
- self._run_spider_with_limit(spider_cls, index + 1, total),
718
- name=f"spider-{spider_cls.__name__}-{index + 1}"
719
- )
720
- for index, spider_cls in enumerate(spider_classes_to_run)
721
- ]
722
-
723
- # Phase 5: Wait for all tasks to complete (failures do not interrupt)
724
- results = await asyncio.gather(*tasks, return_exceptions=True)
725
-
726
- # Phase 6: Statistics exceptions and results
727
- failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
728
- successful = total - len(failed)
729
-
730
- if failed:
731
- failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
732
- logger.error(
733
- f"Crawler execution result: {successful}/{total} succeeded, {len(failed)}/{total} failed\n"
734
- f" - Failed crawlers: {failed_spiders}"
735
- )
736
-
737
- # Record detailed error information
738
- for i in failed:
739
- error = results[i]
740
- logger.error(f"Spider {spider_classes_to_run[i].__name__} error details: {error}")
741
- else:
742
- logger.info(f"All {total} crawlers completed successfully!")
743
-
744
- # Return statistics results
745
- return {
746
- 'total': total,
747
- 'successful': successful,
748
- 'failed': len(failed),
749
- 'success_rate': (successful / total) * 100 if total > 0 else 0,
750
- 'context_stats': self.context.get_stats()
751
- }
752
-
753
- finally:
754
- # Phase 7: Cleanup and shutdown
755
- await self.stop_monitoring()
756
- await self._cleanup_process()
757
-
758
- async def _cleanup_process(self):
759
- """Clean up process resources"""
760
- try:
761
- # Wait for all active crawlers to complete
762
- if self.crawlers:
763
- close_tasks = [crawler.close() for crawler in self.crawlers]
764
- await asyncio.gather(*close_tasks, return_exceptions=True)
765
- self.crawlers.clear()
766
-
767
- # Clean up active tasks
768
- if self._active_tasks:
769
- for task in list(self._active_tasks):
770
- if not task.done():
771
- task.cancel()
772
- await asyncio.gather(*self._active_tasks, return_exceptions=True)
773
- self._active_tasks.clear()
774
-
775
- logger.debug("Process resources cleanup completed")
776
-
777
- except Exception as e:
778
- logger.error(f"Error cleaning up process resources: {e}", exc_info=True)
779
-
780
- def get_process_stats(self) -> Dict[str, Any]:
781
- """Get process statistics information"""
782
- context_stats = self.context.get_stats()
783
-
784
- return {
785
- 'context': context_stats,
786
- 'performance': self._performance_stats.copy(),
787
- 'crawlers': {
788
- 'total_registered': len(self._spider_registry),
789
- 'active_crawlers': len(self.crawlers),
790
- 'max_concurrency': self.max_concurrency
791
- },
792
- 'registry': {
793
- 'spider_names': list(self._spider_registry.keys()),
794
- 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
795
- }
796
- }
797
-
798
- def _resolve_spiders_to_run(
799
- self,
800
- spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
801
- ) -> List[Type[Spider]]:
802
- """
803
- Resolve input to spider class list
804
-
805
- Supports various input formats and validates uniqueness
806
- """
807
- inputs = self._normalize_inputs(spiders_input)
808
- seen_spider_names: Set[str] = set()
809
- spider_classes: List[Type[Spider]] = []
810
-
811
- for item in inputs:
812
- try:
813
- spider_cls = self._resolve_spider_class(item)
814
- spider_name = getattr(spider_cls, 'name', None)
815
-
816
- if not spider_name:
817
- raise ValueError(f"Spider class {spider_cls.__name__} missing 'name' attribute")
818
-
819
- if spider_name in seen_spider_names:
820
- raise ValueError(
821
- f"Duplicate spider name '{spider_name}' in this run.\n"
822
- f"Ensure each spider's name attribute is unique in this run."
823
- )
824
-
825
- seen_spider_names.add(spider_name)
826
- spider_classes.append(spider_cls)
827
-
828
- logger.debug(
829
- f"Spider resolved successfully: {item} -> {spider_cls.__name__} (name='{spider_name}')")
830
-
831
- except Exception as e:
832
- logger.error(f"Failed to resolve spider: {item} - {e}")
833
- raise
834
-
835
- return spider_classes
836
-
837
- @staticmethod
838
- def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
839
- """
840
- Normalize input to list
841
-
842
- Supports more input types and provides better error information
843
- """
844
- if isinstance(spiders_input, (type, str)):
845
- return [spiders_input]
846
- elif isinstance(spiders_input, (list, tuple, set)):
847
- spider_list = list(spiders_input)
848
- if not spider_list:
849
- raise ValueError("Spider list cannot be empty")
850
- return spider_list
851
- else:
852
- raise TypeError(
853
- f"Unsupported spiders parameter type: {type(spiders_input)}\n"
854
- f"Supported types: Spider class, name string, or their list/tuple/set"
855
- )
856
-
857
- def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
858
- """
859
- Resolve single input item to spider class
860
-
861
- Provides better error prompts and debugging information
862
- """
863
- if isinstance(item, type) and issubclass(item, Spider):
864
- # Direct Spider class
865
- return item
866
- elif isinstance(item, str):
867
- # String name, need to look up registry
868
- spider_cls = self._spider_registry.get(item)
869
- if not spider_cls:
870
- available_spiders = list(self._spider_registry.keys())
871
- raise ValueError(
872
- f"Spider named '{item}' not found.\n"
873
- f"Registered spiders: {available_spiders}\n"
874
- f"Please check if the spider name is correct, or ensure the spider has been properly imported and registered."
875
- )
876
- return spider_cls
877
- else:
878
- raise TypeError(
879
- f"Invalid type {type(item)}: {item}\n"
880
- f"Must be Spider class or string name.\n"
881
- f"Example: MySpider or 'my_spider'"
882
- )
883
-
884
- async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
885
- """
886
- Spider running function limited by semaphore
887
-
888
- Includes enhanced error handling and monitoring functionality
889
- """
890
- task = asyncio.current_task()
891
- crawler = None
892
-
893
- try:
894
- # Register task
895
- if task:
896
- self._active_tasks.add(task)
897
-
898
- # Acquire concurrency permit
899
- await self.semaphore.acquire()
900
-
901
- # start_msg = f"[{seq}/{total}] Initializing spider: {spider_cls.__name__}"
902
- # logger.info(start_msg)
903
-
904
- # Create and run crawler
905
- crawler = Crawler(spider_cls, self.settings, self.context)
906
- self.crawlers.add(crawler)
907
-
908
- # Record start time
909
- start_time = time.time()
910
-
911
- # Run crawler
912
- await crawler.crawl()
913
-
914
- # Calculate runtime
915
- duration = time.time() - start_time
916
-
917
- end_msg = (
918
- f"[{seq}/{total}] Crawler completed: {spider_cls.__name__}, "
919
- f"took: {duration:.2f} seconds"
920
- )
921
- logger.info(end_msg)
922
-
923
- # Record success statistics
924
- self._performance_stats['successful_requests'] += 1
925
-
926
- except Exception as e:
927
- # Record failure statistics
928
- self._performance_stats['failed_requests'] += 1
929
-
930
- error_msg = f"Spider {spider_cls.__name__} execution failed: {e}"
931
- logger.error(error_msg, exc_info=True)
932
-
933
- # Record error information to context
934
- if hasattr(self, 'context'):
935
- self.context.increment_failed(error_msg)
936
-
937
- raise
938
- finally:
939
- # Clean up resources
940
- try:
941
- if crawler and crawler in self.crawlers:
942
- self.crawlers.remove(crawler)
943
-
944
- if task and task in self._active_tasks:
945
- self._active_tasks.remove(task)
946
-
947
- self.semaphore.release()
948
-
949
- except Exception as cleanup_error:
950
- logger.warning(f"Error cleaning up resources: {cleanup_error}")
951
-
952
- def _shutdown(self, _signum, _frame):
953
- """
954
- Graceful shutdown signal handling
955
-
956
- Provides better shutdown experience and resource cleanup
957
- """
958
- signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
959
- logger.warning(f"Received shutdown signal {signal_name}, stopping all crawlers...")
960
-
961
- # Set shutdown event
962
- if hasattr(self, '_shutdown_event'):
963
- self._shutdown_event.set()
964
-
965
- # Stop all crawler engines
966
- for crawler in list(self.crawlers):
967
- if crawler.engine:
968
- crawler.engine.running = False
969
- crawler.engine.normal = False
970
- logger.debug(f"Crawler engine stopped: {getattr(crawler.spider, 'name', 'Unknown')}")
971
-
972
- # Create shutdown task
973
- asyncio.create_task(self._wait_for_shutdown())
974
-
975
- logger.info("Shutdown command sent, waiting for crawlers to complete current tasks...")
976
-
977
- async def _wait_for_shutdown(self):
978
- """
979
- Wait for all active tasks to complete
980
-
981
- Provides better shutdown time control and progress feedback
982
- """
983
- try:
984
- # Stop monitoring task
985
- await self.stop_monitoring()
986
-
987
- # Wait for active tasks to complete
988
- pending = [t for t in self._active_tasks if not t.done()]
989
-
990
- if pending:
991
- logger.info(
992
- f"Waiting for {len(pending)} active tasks to complete..."
993
- f"(Maximum wait time: 30 seconds)"
994
- )
995
-
996
- # Set timeout
997
- try:
998
- await asyncio.wait_for(
999
- asyncio.gather(*pending, return_exceptions=True),
1000
- timeout=30.0
1001
- )
1002
- except asyncio.TimeoutError:
1003
- logger.warning("Some tasks timed out, forcing cancellation...")
1004
-
1005
- # Force cancel timed out tasks
1006
- for task in pending:
1007
- if not task.done():
1008
- task.cancel()
1009
-
1010
- # Wait for cancellation to complete
1011
- await asyncio.gather(*pending, return_exceptions=True)
1012
-
1013
- # Final cleanup
1014
- await self._cleanup_process()
1015
-
1016
- # Output final statistics
1017
- final_stats = self.context.get_stats()
1018
- logger.info(
1019
- f"All crawlers gracefully shut down 👋\n"
1020
- f" - Total crawlers: {final_stats['total_crawlers']}\n"
1021
- f" - Successfully completed: {final_stats['completed_crawlers']}\n"
1022
- f" - Failed: {final_stats['failed_crawlers']}\n"
1023
- f" - Success rate: {final_stats['success_rate']:.1f}%\n"
1024
- f" - Total runtime: {final_stats['duration_seconds']} seconds"
1025
- )
1026
-
1027
- except Exception as e:
1028
- logger.error(f"Error during shutdown process: {e}", exc_info=True)
1029
-
1030
- @classmethod
1031
- def _get_default_settings(cls) -> SettingManager:
1032
- """
1033
- Load default configuration
1034
-
1035
- Provides better error handling and fallback strategy
1036
- """
1037
- try:
1038
- settings = get_settings()
1039
- logger.debug("Default configuration loaded successfully")
1040
- return settings
1041
- except Exception as e:
1042
- logger.warning(f"Unable to load default configuration: {e}, using empty configuration")
1043
- return SettingManager()
1044
-
1045
- def _log_startup_info(self):
1046
- """Print startup information, including run mode and key configuration checks"""
1047
- # Get run mode
1048
- run_mode = self.settings.get('RUN_MODE', 'standalone')
1049
-
1050
- # Get version number
1051
- version = self.settings.get('VERSION', '1.0.0')
1052
- if not version or version == 'None':
1053
- version = '1.0.0'
1054
-
1055
- # Print framework start info
1056
- logger.info(f"Crawlo Framework Started {version}")
1057
-
1058
- # Add mode info if available
1059
- mode_info = self.settings.get('_mode_info')
1060
- if mode_info:
1061
- logger.info(mode_info)
1062
- else:
1063
- # 如果没有_mode_info,添加默认信息
1064
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
1065
-
1066
- # Get actual queue type
1067
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
1068
-
1069
- # Display information based on run mode and queue type combination
1070
- if run_mode == 'distributed':
1071
- logger.info("Run Mode: distributed")
1072
- logger.info("Distributed Mode - Multi-node collaboration supported")
1073
- # Show Redis configuration
1074
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
1075
- redis_port = self.settings.get('REDIS_PORT', 6379)
1076
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
1077
- elif run_mode == 'standalone':
1078
- if queue_type == 'redis':
1079
- logger.info("Run Mode: standalone+redis")
1080
- # Show Redis configuration
1081
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
1082
- redis_port = self.settings.get('REDIS_PORT', 6379)
1083
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
1084
- elif queue_type == 'auto':
1085
- logger.info("Run Mode: standalone+auto")
1086
- else: # memory
1087
- logger.info("Run Mode: standalone")
1088
- else:
1089
- logger.info(f"Run Mode: {run_mode}")
1090
-
1091
-
1092
- # === Utility functions ===
1093
-
1094
- def create_crawler_with_optimizations(
1095
- spider_cls: Type[Spider],
1096
- settings: Optional[SettingManager] = None,
1097
- **optimization_kwargs
1098
- ) -> Crawler:
1099
- """
1100
- Create an optimized crawler instance
1101
-
1102
- :param spider_cls: Spider class
1103
- :param settings: Settings manager
1104
- :param optimization_kwargs: Optimization parameters
1105
- :return: Crawler instance
1106
- """
1107
- if settings is None:
1108
- settings = SettingManager()
1109
-
1110
- # Apply optimization configuration
1111
- for key, value in optimization_kwargs.items():
1112
- settings.set(key, value)
1113
-
1114
- context = CrawlerContext()
1115
- return Crawler(spider_cls, settings, context)
1116
-
1117
-
1118
- def create_process_with_large_scale_config(
1119
- config_type: str = 'balanced',
1120
- concurrency: int = 16,
1121
- **kwargs
1122
- ) -> CrawlerProcess:
1123
- """
1124
- Create a process manager that supports large-scale optimization
1125
-
1126
- :param config_type: Configuration type ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1127
- :param concurrency: Concurrency count
1128
- :param kwargs: Other parameters
1129
- :return: Process manager
1130
- """
1131
- try:
1132
- from crawlo.utils.large_scale_config import LargeScaleConfig
1133
-
1134
- # Get optimization configuration
1135
- config_methods = {
1136
- 'conservative': LargeScaleConfig.conservative_config,
1137
- 'balanced': LargeScaleConfig.balanced_config,
1138
- 'aggressive': LargeScaleConfig.aggressive_config,
1139
- 'memory_optimized': LargeScaleConfig.memory_optimized_config
1140
- }
1141
-
1142
- if config_type not in config_methods:
1143
- logger.warning(f"Unknown configuration type: {config_type}, using default configuration")
1144
- settings = SettingManager()
1145
- else:
1146
- config = config_methods[config_type](concurrency)
1147
- settings = SettingManager()
1148
- settings.update(config)
1149
-
1150
- return CrawlerProcess(
1151
- settings=settings,
1152
- max_concurrency=concurrency,
1153
- **kwargs
1154
- )
1155
-
1156
- except ImportError:
1157
- logger.warning("Large-scale configuration module does not exist, using default configuration")
1158
- return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1159
-
1160
-
1161
- # === Exported interfaces ===
1162
-
1163
- __all__ = [
1164
- 'Crawler',
1165
- 'CrawlerProcess',
1166
- 'CrawlerContext',
1167
- 'create_crawler_with_optimizations',
1168
- 'create_process_with_large_scale_config'
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ Crawlo Crawler Module
5
+ ====================
6
+ Provides crawler process management and runtime core functionality.
7
+
8
+ Core Components:
9
+ - Crawler: Single crawler runtime instance, managing Spider and engine lifecycle
10
+ - CrawlerProcess: Crawler process manager, supporting multi-crawler concurrent scheduling and resource management
11
+
12
+ Features:
13
+ - Intelligent concurrency control and resource management
14
+ - Graceful shutdown and signal handling
15
+ - Statistics monitoring and performance tracking
16
+ - Automatic module discovery and registration
17
+ - Error recovery and retry mechanism
18
+ - Large-scale crawler optimization support
19
+
20
+ Example Usage:
21
+ # Single crawler run
22
+ crawler = Crawler(MySpider, settings)
23
+ await crawler.crawl()
24
+
25
+ # Multi-crawler concurrent management
26
+ process = CrawlerProcess()
27
+ await process.crawl([Spider1, Spider2])
28
+ """
29
+ from __future__ import annotations
30
+ import asyncio
31
+ import signal
32
+ import time
33
+ import threading
34
+ from typing import Type, Optional, Set, List, Union, Dict, Any
35
+ from .spider import Spider, get_global_spider_registry
36
+ from .core.engine import Engine
37
+ from .subscriber import Subscriber
38
+ from .extension import ExtensionManager
39
+ from crawlo.utils.log import get_logger
40
+ from .stats_collector import StatsCollector
41
+ from .event import spider_opened, spider_closed
42
+ from .settings.setting_manager import SettingManager
43
+ from crawlo.project import merge_settings, get_settings
44
+
45
+ logger = get_logger(__name__)
46
+
47
+
48
+ class CrawlerContext:
49
+ """
50
+ Crawler context manager
51
+ Provides shared state and resource management
52
+ """
53
+
54
+ def __init__(self):
55
+ self.start_time = time.time()
56
+ self.total_crawlers = 0
57
+ self.active_crawlers = 0
58
+ self.completed_crawlers = 0
59
+ self.failed_crawlers = 0
60
+ self.error_log = []
61
+ self._lock = threading.RLock()
62
+
63
+ def increment_total(self):
64
+ with self._lock:
65
+ self.total_crawlers += 1
66
+
67
+ def increment_active(self):
68
+ with self._lock:
69
+ self.active_crawlers += 1
70
+
71
+ def decrement_active(self):
72
+ with self._lock:
73
+ self.active_crawlers -= 1
74
+
75
+ def increment_completed(self):
76
+ with self._lock:
77
+ self.completed_crawlers += 1
78
+
79
+ def increment_failed(self, error: str):
80
+ with self._lock:
81
+ self.failed_crawlers += 1
82
+ self.error_log.append({
83
+ 'timestamp': time.time(),
84
+ 'error': error
85
+ })
86
+
87
+ def get_stats(self) -> Dict[str, Any]:
88
+ with self._lock:
89
+ duration = time.time() - self.start_time
90
+ return {
91
+ 'total_crawlers': self.total_crawlers,
92
+ 'active_crawlers': self.active_crawlers,
93
+ 'completed_crawlers': self.completed_crawlers,
94
+ 'failed_crawlers': self.failed_crawlers,
95
+ 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
96
+ 'duration_seconds': round(duration, 2),
97
+ 'error_count': len(self.error_log)
98
+ }
99
+
100
+
101
+ class Crawler:
102
+ """
103
+ Single crawler runtime instance, managing Spider and engine lifecycle
104
+
105
+ Provides functionality:
106
+ - Spider lifecycle management (initialization, running, closing)
107
+ - Engine component coordination management
108
+ - Configuration merging and validation
109
+ - Statistics data collection
110
+ - Extension management
111
+ - Exception handling and cleanup
112
+ """
113
+
114
+ def __init__(
115
+ self,
116
+ spider_cls: Type[Spider],
117
+ settings: SettingManager,
118
+ context: Optional[CrawlerContext] = None
119
+ ):
120
+ self.spider_cls = spider_cls
121
+ self.spider: Optional[Spider] = None
122
+ self.engine: Optional[Engine] = None
123
+ self.stats: Optional[StatsCollector] = None
124
+ self.subscriber: Optional[Subscriber] = None
125
+ self.extension: Optional[ExtensionManager] = None
126
+ self.settings: SettingManager = settings.copy()
127
+ self.context = context or CrawlerContext()
128
+
129
+ # State management
130
+ self._closed = False
131
+ self._close_lock = asyncio.Lock()
132
+ self._start_time = None
133
+ self._end_time = None
134
+
135
+ # Performance monitoring
136
+ self._performance_metrics = {
137
+ 'initialization_time': 0,
138
+ 'crawl_duration': 0,
139
+ 'memory_peak': 0,
140
+ 'request_count': 0,
141
+ 'error_count': 0
142
+ }
143
+
144
+ # Initialize components
145
+ self.subscriber = self._create_subscriber()
146
+ self.spider = self._create_spider()
147
+ self.engine = self._create_engine()
148
+ self.stats = self._create_stats()
149
+ # Note: Do not initialize extension manager here, let it initialize in the engine
150
+
151
+ # Validate crawler state
152
+ self._validate_crawler_state()
153
+
154
+ # 将启动信息的打印移到crawl方法中,避免在CrawlerProcess中重复打印
155
+ # self._log_startup_info()
156
+
157
+ # 将启动爬虫名称的日志移到这里,确保在日志系统配置之后打印
158
+ # logger.info(f"Starting running {self.spider.name}")
159
+
160
+ async def crawl(self):
161
+ """
162
+ Start the crawler core process
163
+
164
+ Includes the following stages:
165
+ 1. Initialization stage: Create all components
166
+ 2. Validation stage: Check configuration and state
167
+ 3. Running stage: Start the crawler engine
168
+ 4. Cleanup stage: Resource release
169
+ """
170
+ init_start = time.time()
171
+ self._start_time = init_start
172
+
173
+ try:
174
+ # Update context status
175
+ self.context.increment_active()
176
+
177
+ # Phase 1: Initialize components
178
+ # Adjust component initialization order to ensure log output order meets requirements
179
+ self.subscriber = self._create_subscriber()
180
+ self.spider = self._create_spider()
181
+ self.engine = self._create_engine()
182
+ self.stats = self._create_stats()
183
+ # Note: Do not initialize extension manager here, let it initialize in the engine
184
+
185
+ # Record initialization time
186
+ self._performance_metrics['initialization_time'] = time.time() - init_start
187
+
188
+ # Phase 2: Validate state
189
+ self._validate_crawler_state()
190
+
191
+ # Phase 3: Display runtime configuration summary
192
+ self._log_runtime_summary()
193
+
194
+ # Phase 4: Start crawler
195
+ crawl_start = time.time()
196
+ await self.engine.start_spider(self.spider)
197
+
198
+ # Record crawl time
199
+ self._performance_metrics['crawl_duration'] = time.time() - crawl_start
200
+ self._end_time = time.time()
201
+
202
+ # Update context status
203
+ self.context.increment_completed()
204
+
205
+ logger.info(f"Spider {self.spider.name} completed, took {self._get_total_duration():.2f} seconds")
206
+
207
+ except Exception as e:
208
+ self._performance_metrics['error_count'] += 1
209
+ self.context.increment_failed(str(e))
210
+ logger.error(f"Spider {getattr(self.spider, 'name', 'Unknown')} failed to run: {e}", exc_info=True)
211
+ raise
212
+ finally:
213
+ self.context.decrement_active()
214
+ # Ensure resource cleanup
215
+ await self._ensure_cleanup()
216
+
217
+ def _log_runtime_summary(self):
218
+ """Log runtime configuration summary"""
219
+ # Get spider name
220
+ spider_name = getattr(self.spider, 'name', 'Unknown')
221
+
222
+ # Ensure spider name is a string and strip leading/trailing whitespace
223
+ if spider_name:
224
+ spider_name = str(spider_name).strip()
225
+ else:
226
+ spider_name = 'Unknown'
227
+
228
+ logger.info(f"Starting running {spider_name}")
229
+
230
+ def _validate_crawler_state(self):
231
+ """
232
+ Validate crawler state and configuration
233
+ Ensure all necessary components are properly initialized
234
+ """
235
+ if not self.spider:
236
+ raise RuntimeError("Spider instance not initialized")
237
+ if not self.engine:
238
+ raise RuntimeError("Engine not initialized")
239
+ if not self.stats:
240
+ raise RuntimeError("Stats collector not initialized")
241
+ if not self.subscriber:
242
+ raise RuntimeError("Event subscriber not initialized")
243
+
244
+ # Check key configuration
245
+ if not self.spider.name:
246
+ raise ValueError("Spider name cannot be empty")
247
+
248
+ logger.debug(f"Spider {self.spider.name} state validation passed")
249
+
250
+ def _get_total_duration(self) -> float:
251
+ """Get total runtime"""
252
+ if self._start_time and self._end_time:
253
+ return self._end_time - self._start_time
254
+ return 0.0
255
+
256
+ def _log_startup_info(self):
257
+ """Print startup information, including run mode and key configuration checks"""
258
+ # Get run mode
259
+ run_mode = self.settings.get('RUN_MODE', 'standalone')
260
+
261
+ # Get version number
262
+ version = self.settings.get('VERSION', '1.0.0')
263
+ if not version or version == 'None':
264
+ version = '1.0.0'
265
+
266
+ # Print framework start info
267
+ logger.info(f"Crawlo Framework Started {version}")
268
+
269
+ # Add mode info if available
270
+ mode_info = self.settings.get('_mode_info')
271
+ if mode_info:
272
+ logger.info(mode_info)
273
+ else:
274
+ # 如果没有_mode_info,添加默认信息
275
+ logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
276
+
277
+ # Get actual queue type
278
+ queue_type = self.settings.get('QUEUE_TYPE', 'memory')
279
+
280
+ # Display information based on run mode and queue type combination
281
+ if run_mode == 'distributed':
282
+ logger.info("Run Mode: distributed")
283
+ logger.info("Distributed Mode - Multi-node collaboration supported")
284
+ # Show Redis configuration
285
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
286
+ redis_port = self.settings.get('REDIS_PORT', 6379)
287
+ logger.info(f"Redis Address: {redis_host}:{redis_port}")
288
+ elif run_mode == 'standalone':
289
+ if queue_type == 'redis':
290
+ logger.info("Run Mode: standalone+redis")
291
+ # Show Redis configuration
292
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
293
+ redis_port = self.settings.get('REDIS_PORT', 6379)
294
+ logger.info(f"Redis Address: {redis_host}:{redis_port}")
295
+ elif queue_type == 'auto':
296
+ logger.info("Run Mode: standalone+auto")
297
+ else: # memory
298
+ logger.info("Run Mode: standalone")
299
+ else:
300
+ logger.info(f"Run Mode: {run_mode}")
301
+
302
+ async def _ensure_cleanup(self):
303
+ """Ensure resource cleanup"""
304
+ try:
305
+ if not self._closed:
306
+ await self.close()
307
+ except Exception as e:
308
+ logger.warning(f"Error cleaning up resources: {e}")
309
+
310
+ def get_performance_metrics(self) -> Dict[str, Any]:
311
+ """Get performance metrics"""
312
+ metrics = self._performance_metrics.copy()
313
+ metrics['total_duration'] = self._get_total_duration()
314
+ if self.stats:
315
+ # Add statistics data
316
+ stats_data = getattr(self.stats, 'get_stats', lambda: {})()
317
+ metrics.update(stats_data)
318
+ return metrics
319
+
320
+ @staticmethod
321
+ def _create_subscriber() -> Subscriber:
322
+ """Create event subscriber"""
323
+ return Subscriber()
324
+
325
+ def _create_spider(self) -> Spider:
326
+ """
327
+ Create and validate spider instance (enhanced version)
328
+
329
+ Performs the following validations:
330
+ - Spider name must exist
331
+ - start_requests method must be callable
332
+ - start_urls cannot be a string
333
+ - parse method is recommended to exist
334
+ """
335
+ spider = self.spider_cls.create_instance(self)
336
+
337
+ # Required attribute check
338
+ if not getattr(spider, 'name', None):
339
+ raise AttributeError(
340
+ f"Spider class '{self.spider_cls.__name__}' must define 'name' attribute.\n"
341
+ f"Example: name = 'my_spider'"
342
+ )
343
+
344
+ if not callable(getattr(spider, 'start_requests', None)):
345
+ raise AttributeError(
346
+ f"Spider '{spider.name}' must implement a callable 'start_requests' method.\n"
347
+ f"Example: def start_requests(self): yield Request(url='...')"
348
+ )
349
+
350
+ # start_urls type check
351
+ start_urls = getattr(spider, 'start_urls', [])
352
+ if isinstance(start_urls, str):
353
+ raise TypeError(
354
+ f"Spider '{spider.name}' 'start_urls' must be a list or tuple, not a string.\n"
355
+ f"Correct: start_urls = ['http://example.com']\n"
356
+ f"Incorrect: start_urls = 'http://example.com'"
357
+ )
358
+
359
+ # parse method check (warning instead of error)
360
+ if not callable(getattr(spider, 'parse', None)):
361
+ logger.warning(
362
+ f"Spider '{spider.name}' does not define 'parse' method.\n"
363
+ f"Ensure all Requests specify a callback function, otherwise responses will be ignored."
364
+ )
365
+
366
+ # Set spider configuration
367
+ self._set_spider(spider)
368
+
369
+ logger.debug(f"Spider '{spider.name}' initialized successfully")
370
+ return spider
371
+
372
+ def _create_engine(self) -> Engine:
373
+ """Create and initialize engine"""
374
+ engine = Engine(self)
375
+ engine.engine_start()
376
+ logger.debug(f"Engine initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
377
+ return engine
378
+
379
+ def _create_stats(self) -> StatsCollector:
380
+ """Create stats collector"""
381
+ stats = StatsCollector(self)
382
+ logger.debug(
383
+ f"Stats collector initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
384
+ return stats
385
+
386
+ def _create_extension(self) -> ExtensionManager:
387
+ """Create extension manager"""
388
+ # Modify extension manager creation method, delay initialization until needed
389
+ extension = ExtensionManager.create_instance(self)
390
+ logger.debug(
391
+ f"Extension manager initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
392
+ return extension
393
+
394
+ def _set_spider(self, spider: Spider):
395
+ """
396
+ Set spider configuration and event subscription
397
+ Bind spider lifecycle events with subscriber
398
+ """
399
+ # Subscribe to spider lifecycle events
400
+ self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
401
+ self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
402
+
403
+ # Merge spider custom configuration
404
+ merge_settings(spider, self.settings)
405
+
406
+ logger.debug(f"Spider '{spider.name}' configuration merged successfully")
407
+
408
+ async def close(self, reason='finished') -> None:
409
+ """
410
+ Close crawler and clean up resources (enhanced version)
411
+
412
+ Ensure closing only once and handle all cleanup operations
413
+ """
414
+ async with self._close_lock:
415
+ if self._closed:
416
+ return
417
+
418
+ self._closed = True
419
+ self._end_time = time.time()
420
+
421
+ try:
422
+ # Notify spider close event
423
+ if self.subscriber:
424
+ await self.subscriber.notify(spider_closed)
425
+
426
+ # Statistics data collection
427
+ if self.stats and self.spider:
428
+ self.stats.close_spider(spider=self.spider, reason=reason)
429
+ # Record statistics data
430
+ try:
431
+ from crawlo.commands.stats import record_stats
432
+ record_stats(self)
433
+ except ImportError:
434
+ logger.debug("Statistics recording module does not exist, skipping statistics recording")
435
+
436
+ logger.info(
437
+ f"Spider '{getattr(self.spider, 'name', 'Unknown')}' closed, "
438
+ f"reason: {reason}, took: {self._get_total_duration():.2f} seconds"
439
+ )
440
+
441
+ except Exception as e:
442
+ logger.error(f"Error closing crawler: {e}", exc_info=True)
443
+ finally:
444
+ # Ensure resource cleanup
445
+ await self._cleanup_resources()
446
+
447
+ async def _cleanup_resources(self):
448
+ """Clean up all resources"""
449
+ cleanup_tasks = []
450
+
451
+ # Engine cleanup
452
+ if self.engine:
453
+ try:
454
+ cleanup_tasks.append(self.engine.close())
455
+ except AttributeError:
456
+ pass # Engine has no close method
457
+
458
+ # Extension cleanup
459
+ if self.extension:
460
+ try:
461
+ cleanup_tasks.append(self.extension.close())
462
+ except AttributeError:
463
+ pass
464
+
465
+ # Stats collector cleanup
466
+ if self.stats:
467
+ try:
468
+ cleanup_tasks.append(self.stats.close())
469
+ except AttributeError:
470
+ pass
471
+
472
+ # Concurrently execute cleanup tasks
473
+ if cleanup_tasks:
474
+ await asyncio.gather(*cleanup_tasks, return_exceptions=True)
475
+
476
+ logger.debug("Resource cleanup completed")
477
+
478
+
479
+ class CrawlerProcess:
480
+ """
481
+ Crawler process manager
482
+
483
+ Supported features:
484
+ - Multi-crawler concurrent scheduling and resource management
485
+ - Automatic module discovery and spider registration
486
+ - Intelligent concurrency control and load balancing
487
+ - Graceful shutdown and signal handling
488
+ - Real-time status monitoring and statistics
489
+ - Error recovery and retry mechanism
490
+ - Large-scale crawler optimization support
491
+
492
+ Usage example:
493
+ # Basic usage
494
+ process = CrawlerProcess()
495
+ await process.crawl(MySpider)
496
+
497
+ # Multi-crawler concurrency
498
+ await process.crawl([Spider1, Spider2, 'spider_name'])
499
+
500
+ # Custom concurrency
501
+ process = CrawlerProcess(max_concurrency=8)
502
+ """
503
+
504
+ def __init__(
505
+ self,
506
+ settings: Optional[SettingManager] = None,
507
+ max_concurrency: Optional[int] = None,
508
+ spider_modules: Optional[List[str]] = None,
509
+ enable_monitoring: bool = True
510
+ ):
511
+ # Basic configuration
512
+ self.settings: SettingManager = settings or self._get_default_settings()
513
+ self.crawlers: Set[Crawler] = set()
514
+ self._active_tasks: Set[asyncio.Task] = set()
515
+
516
+ # Context manager
517
+ self.context = CrawlerContext()
518
+
519
+ # Concurrency control configuration
520
+ self.max_concurrency: int = (
521
+ max_concurrency
522
+ or self.settings.get('MAX_RUNNING_SPIDERS')
523
+ or self.settings.get('CONCURRENCY', 3)
524
+ )
525
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
526
+
527
+ # Monitoring configuration
528
+ self.enable_monitoring = enable_monitoring
529
+ self._monitoring_task = None
530
+ self._shutdown_event = asyncio.Event()
531
+
532
+ # Automatically discover and import spider modules
533
+ if spider_modules:
534
+ self.auto_discover(spider_modules)
535
+
536
+ # Use snapshot of global registry (avoid subsequent import impact)
537
+ self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
538
+
539
+ # Performance monitoring
540
+ self._performance_stats = {
541
+ 'total_requests': 0,
542
+ 'successful_requests': 0,
543
+ 'failed_requests': 0,
544
+ 'memory_usage_mb': 0,
545
+ 'cpu_usage_percent': 0
546
+ }
547
+
548
+ # Register signal handlers
549
+ signal.signal(signal.SIGINT, self._shutdown)
550
+ signal.signal(signal.SIGTERM, self._shutdown)
551
+
552
+ def _log_startup_info(self):
553
+ """Print startup information, including run mode and key configuration checks"""
554
+ # Get run mode
555
+ run_mode = self.settings.get('RUN_MODE', 'standalone')
556
+
557
+ # Get version number
558
+ version = self.settings.get('VERSION', '1.0.0')
559
+ if not version or version == 'None':
560
+ version = '1.0.0'
561
+
562
+ # Print framework start info
563
+ logger.info(f"Crawlo Framework Started {version}")
564
+
565
+ # Add mode info if available
566
+ mode_info = self.settings.get('_mode_info')
567
+ if mode_info:
568
+ logger.info(mode_info)
569
+ else:
570
+ # 如果没有_mode_info,添加默认信息
571
+ logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
572
+
573
+ # Get actual queue type
574
+ queue_type = self.settings.get('QUEUE_TYPE', 'memory')
575
+
576
+ # Display information based on run mode and queue type combination
577
+ if run_mode == 'distributed':
578
+ logger.info("Run Mode: distributed")
579
+ logger.info("Distributed Mode - Multi-node collaboration supported")
580
+ # Show Redis configuration
581
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
582
+ redis_port = self.settings.get('REDIS_PORT', 6379)
583
+ logger.info(f"Redis Address: {redis_host}:{redis_port}")
584
+ elif run_mode == 'standalone':
585
+ if queue_type == 'redis':
586
+ logger.info("Run Mode: standalone+redis")
587
+ # Show Redis configuration
588
+ redis_host = self.settings.get('REDIS_HOST', 'localhost')
589
+ redis_port = self.settings.get('REDIS_PORT', 6379)
590
+ logger.info(f"Redis Address: {redis_host}:{redis_port}")
591
+ elif queue_type == 'auto':
592
+ logger.info("Run Mode: standalone+auto")
593
+ else: # memory
594
+ logger.info("Run Mode: standalone")
595
+ else:
596
+ logger.info(f"Run Mode: {run_mode}")
597
+
598
+ logger.debug(
599
+ f"CrawlerProcess initialized successfully\n"
600
+ f" - Max concurrent crawlers: {self.max_concurrency}\n"
601
+ f" - Registered crawlers: {len(self._spider_registry)}\n"
602
+ f" - Monitoring enabled: {self.enable_monitoring}"
603
+ )
604
+
605
+ async def start_monitoring(self):
606
+ """Start monitoring task"""
607
+ if not self.enable_monitoring:
608
+ return
609
+
610
+ self._monitoring_task = asyncio.create_task(self._monitor_loop())
611
+ logger.debug("Monitoring task started")
612
+
613
+ async def stop_monitoring(self):
614
+ """Stop monitoring task"""
615
+ if self._monitoring_task and not self._monitoring_task.done():
616
+ self._monitoring_task.cancel()
617
+ try:
618
+ await self._monitoring_task
619
+ except asyncio.CancelledError:
620
+ pass
621
+ logger.debug("Monitoring task stopped")
622
+
623
+ async def _monitor_loop(self):
624
+ """Monitoring loop, periodically collect and report status"""
625
+ try:
626
+ while not self._shutdown_event.is_set():
627
+ await self._collect_performance_stats()
628
+
629
+ # Output status every 30 seconds
630
+ stats = self.context.get_stats()
631
+ if stats['active_crawlers'] > 0:
632
+ logger.debug(
633
+ f"Crawler status: Active {stats['active_crawlers']}, "
634
+ f"Completed {stats['completed_crawlers']}, "
635
+ f"Failed {stats['failed_crawlers']}, "
636
+ f"Success rate {stats['success_rate']:.1f}%"
637
+ )
638
+
639
+ await asyncio.sleep(30) # 30 second interval
640
+
641
+ except asyncio.CancelledError:
642
+ logger.debug("Monitoring loop cancelled")
643
+ except Exception as e:
644
+ logger.error(f"Monitoring loop error: {e}", exc_info=True)
645
+
646
+ async def _collect_performance_stats(self):
647
+ """Collect performance statistics data"""
648
+ try:
649
+ import psutil
650
+ import os
651
+
652
+ process = psutil.Process(os.getpid())
653
+ memory_info = process.memory_info()
654
+
655
+ self._performance_stats.update({
656
+ 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
657
+ 'cpu_usage_percent': round(process.cpu_percent(), 2)
658
+ })
659
+
660
+ except ImportError:
661
+ # Skip performance monitoring when psutil is not available
662
+ pass
663
+ except Exception as e:
664
+ logger.debug(f"Failed to collect performance statistics: {e}")
665
+
666
+ @staticmethod
667
+ def auto_discover(modules: List[str]):
668
+ """
669
+ Automatically import modules, trigger Spider class definition and registration (enhanced version)
670
+
671
+ Supports recursive scanning and error recovery
672
+ """
673
+ import importlib
674
+ import pkgutil
675
+
676
+ discovered_count = 0
677
+ error_count = 0
678
+
679
+ for module_name in modules:
680
+ try:
681
+ module = importlib.import_module(module_name)
682
+
683
+ if hasattr(module, '__path__'):
684
+ # Package module, recursive scanning
685
+ for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
686
+ try:
687
+ importlib.import_module(name)
688
+ discovered_count += 1
689
+ except Exception as sub_e:
690
+ error_count += 1
691
+ logger.warning(f"Failed to import submodule {name}: {sub_e}")
692
+ else:
693
+ # Single module
694
+ importlib.import_module(module_name)
695
+ discovered_count += 1
696
+
697
+ logger.debug(f"Module scanned: {module_name}")
698
+
699
+ except Exception as e:
700
+ error_count += 1
701
+ logger.error(f"Failed to scan module {module_name}: {e}", exc_info=True)
702
+
703
+ logger.debug(
704
+ f"Spider registration completed: {discovered_count} succeeded, {error_count} failed"
705
+ )
706
+
707
+ # === Public read-only interface: Avoid direct access to _spider_registry ===
708
+
709
+ def get_spider_names(self) -> List[str]:
710
+ """Get all registered spider names"""
711
+ return list(self._spider_registry.keys())
712
+
713
+ def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
714
+ """Get spider class by name"""
715
+ return self._spider_registry.get(name)
716
+
717
+ def is_spider_registered(self, name: str) -> bool:
718
+ """Check if a name is registered"""
719
+ return name in self._spider_registry
720
+
721
+ async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
722
+ """
723
+ Start one or more crawlers
724
+
725
+ Enhanced features:
726
+ - Intelligent concurrency control
727
+ - Real-time monitoring and statistics
728
+ - Error recovery and retry
729
+ - Graceful shutdown handling
730
+ """
731
+ # Phase 1: Preprocessing and validation
732
+ spider_classes_to_run = self._resolve_spiders_to_run(spiders)
733
+ total = len(spider_classes_to_run)
734
+
735
+ if total == 0:
736
+ raise ValueError("At least one spider class or name must be provided")
737
+
738
+ # 打印启动信息,确保在日志系统配置之后打印
739
+ # 在这里调用_log_startup_info,确保框架启动信息能正确输出到日志文件中
740
+ self._log_startup_info()
741
+
742
+ # Phase 2: Initialize context and monitoring
743
+ for _ in range(total):
744
+ self.context.increment_total()
745
+
746
+ # Start monitoring task
747
+ await self.start_monitoring()
748
+
749
+ try:
750
+ # Phase 3: Initialize context and monitoring
751
+ spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
752
+
753
+ logger.debug(
754
+ f"Starting {total} crawlers\n"
755
+ f" - Max concurrency: {self.max_concurrency}\n"
756
+ f" - Spider list: {[cls.__name__ for cls in spider_classes_to_run]}"
757
+ )
758
+
759
+ # Phase 4: Stream start all crawler tasks
760
+ tasks = [
761
+ asyncio.create_task(
762
+ self._run_spider_with_limit(spider_cls, index + 1, total),
763
+ name=f"spider-{spider_cls.__name__}-{index + 1}"
764
+ )
765
+ for index, spider_cls in enumerate(spider_classes_to_run)
766
+ ]
767
+
768
+ # Phase 5: Wait for all tasks to complete (failures do not interrupt)
769
+ results = await asyncio.gather(*tasks, return_exceptions=True)
770
+
771
+ # Phase 6: Statistics exceptions and results
772
+ failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
773
+ successful = total - len(failed)
774
+
775
+ if failed:
776
+ failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
777
+ logger.error(
778
+ f"Crawler execution result: {successful}/{total} succeeded, {len(failed)}/{total} failed\n"
779
+ f" - Failed crawlers: {failed_spiders}"
780
+ )
781
+
782
+ # Record detailed error information
783
+ for i in failed:
784
+ error = results[i]
785
+ logger.error(f"Spider {spider_classes_to_run[i].__name__} error details: {error}")
786
+ else:
787
+ logger.info(f"All {total} crawlers completed successfully!")
788
+
789
+ # Return statistics results
790
+ return {
791
+ 'total': total,
792
+ 'successful': successful,
793
+ 'failed': len(failed),
794
+ 'success_rate': (successful / total) * 100 if total > 0 else 0,
795
+ 'context_stats': self.context.get_stats()
796
+ }
797
+
798
+ finally:
799
+ # Phase 7: Cleanup and shutdown
800
+ await self.stop_monitoring()
801
+ await self._cleanup_process()
802
+
803
+ async def _cleanup_process(self):
804
+ """Clean up process resources"""
805
+ try:
806
+ # Wait for all active crawlers to complete
807
+ if self.crawlers:
808
+ close_tasks = [crawler.close() for crawler in self.crawlers]
809
+ await asyncio.gather(*close_tasks, return_exceptions=True)
810
+ self.crawlers.clear()
811
+
812
+ # Clean up active tasks
813
+ if self._active_tasks:
814
+ for task in list(self._active_tasks):
815
+ if not task.done():
816
+ task.cancel()
817
+ await asyncio.gather(*self._active_tasks, return_exceptions=True)
818
+ self._active_tasks.clear()
819
+
820
+ logger.debug("Process resources cleanup completed")
821
+
822
+ except Exception as e:
823
+ logger.error(f"Error cleaning up process resources: {e}", exc_info=True)
824
+
825
+ def get_process_stats(self) -> Dict[str, Any]:
826
+ """Get process statistics information"""
827
+ context_stats = self.context.get_stats()
828
+
829
+ return {
830
+ 'context': context_stats,
831
+ 'performance': self._performance_stats.copy(),
832
+ 'crawlers': {
833
+ 'total_registered': len(self._spider_registry),
834
+ 'active_crawlers': len(self.crawlers),
835
+ 'max_concurrency': self.max_concurrency
836
+ },
837
+ 'registry': {
838
+ 'spider_names': list(self._spider_registry.keys()),
839
+ 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
840
+ }
841
+ }
842
+
843
+ def _resolve_spiders_to_run(
844
+ self,
845
+ spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
846
+ ) -> List[Type[Spider]]:
847
+ """
848
+ Resolve input to spider class list
849
+
850
+ Supports various input formats and validates uniqueness
851
+ """
852
+ inputs = self._normalize_inputs(spiders_input)
853
+ seen_spider_names: Set[str] = set()
854
+ spider_classes: List[Type[Spider]] = []
855
+
856
+ for item in inputs:
857
+ try:
858
+ spider_cls = self._resolve_spider_class(item)
859
+ spider_name = getattr(spider_cls, 'name', None)
860
+
861
+ if not spider_name:
862
+ raise ValueError(f"Spider class {spider_cls.__name__} missing 'name' attribute")
863
+
864
+ if spider_name in seen_spider_names:
865
+ raise ValueError(
866
+ f"Duplicate spider name '{spider_name}' in this run.\n"
867
+ f"Ensure each spider's name attribute is unique in this run."
868
+ )
869
+
870
+ seen_spider_names.add(spider_name)
871
+ spider_classes.append(spider_cls)
872
+
873
+ logger.debug(
874
+ f"Spider resolved successfully: {item} -> {spider_cls.__name__} (name='{spider_name}')")
875
+
876
+ except Exception as e:
877
+ logger.error(f"Failed to resolve spider: {item} - {e}")
878
+ raise
879
+
880
+ return spider_classes
881
+
882
+ @staticmethod
883
+ def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
884
+ """
885
+ Normalize input to list
886
+
887
+ Supports more input types and provides better error information
888
+ """
889
+ if isinstance(spiders_input, (type, str)):
890
+ return [spiders_input]
891
+ elif isinstance(spiders_input, (list, tuple, set)):
892
+ spider_list = list(spiders_input)
893
+ if not spider_list:
894
+ raise ValueError("Spider list cannot be empty")
895
+ return spider_list
896
+ else:
897
+ raise TypeError(
898
+ f"Unsupported spiders parameter type: {type(spiders_input)}\n"
899
+ f"Supported types: Spider class, name string, or their list/tuple/set"
900
+ )
901
+
902
+ def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
903
+ """
904
+ Resolve single input item to spider class
905
+
906
+ Provides better error prompts and debugging information
907
+ """
908
+ if isinstance(item, type) and issubclass(item, Spider):
909
+ # Direct Spider class
910
+ return item
911
+ elif isinstance(item, str):
912
+ # String name, need to look up registry
913
+ spider_cls = self._spider_registry.get(item)
914
+ if not spider_cls:
915
+ available_spiders = list(self._spider_registry.keys())
916
+ raise ValueError(
917
+ f"Spider named '{item}' not found.\n"
918
+ f"Registered spiders: {available_spiders}\n"
919
+ f"Please check if the spider name is correct, or ensure the spider has been properly imported and registered."
920
+ )
921
+ return spider_cls
922
+ else:
923
+ raise TypeError(
924
+ f"Invalid type {type(item)}: {item}\n"
925
+ f"Must be Spider class or string name.\n"
926
+ f"Example: MySpider or 'my_spider'"
927
+ )
928
+
929
+ async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
930
+ """
931
+ Spider running function limited by semaphore
932
+
933
+ Includes enhanced error handling and monitoring functionality
934
+ """
935
+ task = asyncio.current_task()
936
+ crawler = None
937
+
938
+ try:
939
+ # Register task
940
+ if task:
941
+ self._active_tasks.add(task)
942
+
943
+ # Acquire concurrency permit
944
+ await self.semaphore.acquire()
945
+
946
+ # start_msg = f"[{seq}/{total}] Initializing spider: {spider_cls.__name__}"
947
+ # logger.info(start_msg)
948
+
949
+ # Create and run crawler
950
+ crawler = Crawler(spider_cls, self.settings, self.context)
951
+ self.crawlers.add(crawler)
952
+
953
+ # Record start time
954
+ start_time = time.time()
955
+
956
+ # Run crawler
957
+ await crawler.crawl()
958
+
959
+ # Calculate runtime
960
+ duration = time.time() - start_time
961
+
962
+ end_msg = (
963
+ f"[{seq}/{total}] Crawler completed: {spider_cls.__name__}, "
964
+ f"took: {duration:.2f} seconds"
965
+ )
966
+ logger.info(end_msg)
967
+
968
+ # Record success statistics
969
+ self._performance_stats['successful_requests'] += 1
970
+
971
+ except Exception as e:
972
+ # Record failure statistics
973
+ self._performance_stats['failed_requests'] += 1
974
+
975
+ error_msg = f"Spider {spider_cls.__name__} execution failed: {e}"
976
+ logger.error(error_msg, exc_info=True)
977
+
978
+ # Record error information to context
979
+ if hasattr(self, 'context'):
980
+ self.context.increment_failed(error_msg)
981
+
982
+ raise
983
+ finally:
984
+ # Clean up resources
985
+ try:
986
+ if crawler and crawler in self.crawlers:
987
+ self.crawlers.remove(crawler)
988
+
989
+ if task and task in self._active_tasks:
990
+ self._active_tasks.remove(task)
991
+
992
+ self.semaphore.release()
993
+
994
+ except Exception as cleanup_error:
995
+ logger.warning(f"Error cleaning up resources: {cleanup_error}")
996
+
997
+ def _shutdown(self, _signum, _frame):
998
+ """
999
+ Graceful shutdown signal handling
1000
+
1001
+ Provides better shutdown experience and resource cleanup
1002
+ """
1003
+ signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
1004
+ logger.warning(f"Received shutdown signal {signal_name}, stopping all crawlers...")
1005
+
1006
+ # Set shutdown event
1007
+ if hasattr(self, '_shutdown_event'):
1008
+ self._shutdown_event.set()
1009
+
1010
+ # Stop all crawler engines
1011
+ for crawler in list(self.crawlers):
1012
+ if crawler.engine:
1013
+ crawler.engine.running = False
1014
+ crawler.engine.normal = False
1015
+ logger.debug(f"Crawler engine stopped: {getattr(crawler.spider, 'name', 'Unknown')}")
1016
+
1017
+ # Create shutdown task
1018
+ asyncio.create_task(self._wait_for_shutdown())
1019
+
1020
+ logger.info("Shutdown command sent, waiting for crawlers to complete current tasks...")
1021
+
1022
+ async def _wait_for_shutdown(self):
1023
+ """
1024
+ Wait for all active tasks to complete
1025
+
1026
+ Provides better shutdown time control and progress feedback
1027
+ """
1028
+ try:
1029
+ # Stop monitoring task
1030
+ await self.stop_monitoring()
1031
+
1032
+ # Wait for active tasks to complete
1033
+ pending = [t for t in self._active_tasks if not t.done()]
1034
+
1035
+ if pending:
1036
+ logger.info(
1037
+ f"Waiting for {len(pending)} active tasks to complete..."
1038
+ f"(Maximum wait time: 30 seconds)"
1039
+ )
1040
+
1041
+ # Set timeout
1042
+ try:
1043
+ await asyncio.wait_for(
1044
+ asyncio.gather(*pending, return_exceptions=True),
1045
+ timeout=30.0
1046
+ )
1047
+ except asyncio.TimeoutError:
1048
+ logger.warning("Some tasks timed out, forcing cancellation...")
1049
+
1050
+ # Force cancel timed out tasks
1051
+ for task in pending:
1052
+ if not task.done():
1053
+ task.cancel()
1054
+
1055
+ # Wait for cancellation to complete
1056
+ await asyncio.gather(*pending, return_exceptions=True)
1057
+
1058
+ # Final cleanup
1059
+ await self._cleanup_process()
1060
+
1061
+ # Output final statistics
1062
+ final_stats = self.context.get_stats()
1063
+ logger.info(
1064
+ f"All crawlers gracefully shut down 👋\n"
1065
+ f" - Total crawlers: {final_stats['total_crawlers']}\n"
1066
+ f" - Successfully completed: {final_stats['completed_crawlers']}\n"
1067
+ f" - Failed: {final_stats['failed_crawlers']}\n"
1068
+ f" - Success rate: {final_stats['success_rate']:.1f}%\n"
1069
+ f" - Total runtime: {final_stats['duration_seconds']} seconds"
1070
+ )
1071
+
1072
+ except Exception as e:
1073
+ logger.error(f"Error during shutdown process: {e}", exc_info=True)
1074
+
1075
+ @classmethod
1076
+ def _get_default_settings(cls) -> SettingManager:
1077
+ """
1078
+ Load default configuration
1079
+
1080
+ Provides better error handling and fallback strategy
1081
+ """
1082
+ try:
1083
+ settings = get_settings()
1084
+ logger.debug("Default configuration loaded successfully")
1085
+ return settings
1086
+ except Exception as e:
1087
+ logger.warning(f"Unable to load default configuration: {e}, using empty configuration")
1088
+ return SettingManager()
1089
+
1090
+ # === Utility functions ===
1091
+
1092
+ def create_crawler_with_optimizations(
1093
+ spider_cls: Type[Spider],
1094
+ settings: Optional[SettingManager] = None,
1095
+ **optimization_kwargs
1096
+ ) -> Crawler:
1097
+ """
1098
+ Create an optimized crawler instance
1099
+
1100
+ :param spider_cls: Spider class
1101
+ :param settings: Settings manager
1102
+ :param optimization_kwargs: Optimization parameters
1103
+ :return: Crawler instance
1104
+ """
1105
+ if settings is None:
1106
+ settings = SettingManager()
1107
+
1108
+ # Apply optimization configuration
1109
+ for key, value in optimization_kwargs.items():
1110
+ settings.set(key, value)
1111
+
1112
+ context = CrawlerContext()
1113
+ return Crawler(spider_cls, settings, context)
1114
+
1115
+
1116
+ def create_process_with_large_scale_config(
1117
+ config_type: str = 'balanced',
1118
+ concurrency: int = 16,
1119
+ **kwargs
1120
+ ) -> CrawlerProcess:
1121
+ """
1122
+ Create a process manager that supports large-scale optimization
1123
+
1124
+ :param config_type: Configuration type ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1125
+ :param concurrency: Concurrency count
1126
+ :param kwargs: Other parameters
1127
+ :return: Process manager
1128
+ """
1129
+ try:
1130
+ from crawlo.utils.large_scale_config import LargeScaleConfig
1131
+
1132
+ # Get optimization configuration
1133
+ config_methods = {
1134
+ 'conservative': LargeScaleConfig.conservative_config,
1135
+ 'balanced': LargeScaleConfig.balanced_config,
1136
+ 'aggressive': LargeScaleConfig.aggressive_config,
1137
+ 'memory_optimized': LargeScaleConfig.memory_optimized_config
1138
+ }
1139
+
1140
+ if config_type not in config_methods:
1141
+ logger.warning(f"Unknown configuration type: {config_type}, using default configuration")
1142
+ settings = SettingManager()
1143
+ else:
1144
+ config = config_methods[config_type](concurrency)
1145
+ settings = SettingManager()
1146
+ settings.update(config)
1147
+
1148
+ return CrawlerProcess(
1149
+ settings=settings,
1150
+ max_concurrency=concurrency,
1151
+ **kwargs
1152
+ )
1153
+
1154
+ except ImportError:
1155
+ logger.warning("Large-scale configuration module does not exist, using default configuration")
1156
+ return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1157
+
1158
+
1159
+ # === Exported interfaces ===
1160
+
1161
+ __all__ = [
1162
+ 'Crawler',
1163
+ 'CrawlerProcess',
1164
+ 'CrawlerContext',
1165
+ 'create_crawler_with_optimizations',
1166
+ 'create_process_with_large_scale_config'
1169
1167
  ]