crawlo 1.3.3__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (279) hide show
  1. crawlo/__init__.py +87 -63
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -323
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +46 -2
  16. crawlo/core/engine.py +439 -365
  17. crawlo/core/processor.py +40 -40
  18. crawlo/core/scheduler.py +257 -256
  19. crawlo/crawler.py +639 -1167
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +228 -226
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +39 -39
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +57 -57
  34. crawlo/extension/log_stats.py +81 -81
  35. crawlo/extension/logging_extension.py +61 -52
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +28 -0
  40. crawlo/factories/base.py +69 -0
  41. crawlo/factories/crawler.py +104 -0
  42. crawlo/factories/registry.py +85 -0
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -234
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -0
  47. crawlo/initialization/__init__.py +40 -0
  48. crawlo/initialization/built_in.py +426 -0
  49. crawlo/initialization/context.py +142 -0
  50. crawlo/initialization/core.py +194 -0
  51. crawlo/initialization/phases.py +149 -0
  52. crawlo/initialization/registry.py +146 -0
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -22
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +38 -0
  58. crawlo/logging/config.py +97 -0
  59. crawlo/logging/factory.py +129 -0
  60. crawlo/logging/manager.py +112 -0
  61. crawlo/middleware/__init__.py +21 -21
  62. crawlo/middleware/default_header.py +132 -132
  63. crawlo/middleware/download_delay.py +104 -104
  64. crawlo/middleware/middleware_manager.py +135 -135
  65. crawlo/middleware/offsite.py +123 -123
  66. crawlo/middleware/proxy.py +386 -386
  67. crawlo/middleware/request_ignore.py +86 -86
  68. crawlo/middleware/response_code.py +163 -163
  69. crawlo/middleware/response_filter.py +136 -136
  70. crawlo/middleware/retry.py +124 -124
  71. crawlo/middleware/simple_proxy.py +65 -65
  72. crawlo/mode_manager.py +212 -187
  73. crawlo/network/__init__.py +21 -21
  74. crawlo/network/request.py +379 -379
  75. crawlo/network/response.py +359 -359
  76. crawlo/pipelines/__init__.py +21 -21
  77. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  78. crawlo/pipelines/console_pipeline.py +39 -39
  79. crawlo/pipelines/csv_pipeline.py +316 -316
  80. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  81. crawlo/pipelines/json_pipeline.py +218 -218
  82. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  83. crawlo/pipelines/mongo_pipeline.py +131 -131
  84. crawlo/pipelines/mysql_pipeline.py +318 -318
  85. crawlo/pipelines/pipeline_manager.py +76 -75
  86. crawlo/pipelines/redis_dedup_pipeline.py +166 -166
  87. crawlo/project.py +327 -325
  88. crawlo/queue/pqueue.py +43 -37
  89. crawlo/queue/queue_manager.py +503 -379
  90. crawlo/queue/redis_priority_queue.py +326 -306
  91. crawlo/settings/__init__.py +7 -7
  92. crawlo/settings/default_settings.py +321 -225
  93. crawlo/settings/setting_manager.py +214 -198
  94. crawlo/spider/__init__.py +657 -639
  95. crawlo/stats_collector.py +73 -59
  96. crawlo/subscriber.py +129 -129
  97. crawlo/task_manager.py +139 -30
  98. crawlo/templates/crawlo.cfg.tmpl +10 -10
  99. crawlo/templates/project/__init__.py.tmpl +3 -3
  100. crawlo/templates/project/items.py.tmpl +17 -17
  101. crawlo/templates/project/middlewares.py.tmpl +118 -118
  102. crawlo/templates/project/pipelines.py.tmpl +96 -96
  103. crawlo/templates/project/settings.py.tmpl +168 -267
  104. crawlo/templates/project/settings_distributed.py.tmpl +167 -180
  105. crawlo/templates/project/settings_gentle.py.tmpl +167 -61
  106. crawlo/templates/project/settings_high_performance.py.tmpl +168 -131
  107. crawlo/templates/project/settings_minimal.py.tmpl +66 -35
  108. crawlo/templates/project/settings_simple.py.tmpl +165 -102
  109. crawlo/templates/project/spiders/__init__.py.tmpl +10 -6
  110. crawlo/templates/run.py.tmpl +34 -38
  111. crawlo/templates/spider/spider.py.tmpl +143 -143
  112. crawlo/templates/spiders_init.py.tmpl +10 -0
  113. crawlo/tools/__init__.py +200 -200
  114. crawlo/tools/anti_crawler.py +268 -268
  115. crawlo/tools/authenticated_proxy.py +240 -240
  116. crawlo/tools/data_formatter.py +225 -225
  117. crawlo/tools/data_validator.py +180 -180
  118. crawlo/tools/date_tools.py +289 -289
  119. crawlo/tools/distributed_coordinator.py +388 -388
  120. crawlo/tools/encoding_converter.py +127 -127
  121. crawlo/tools/network_diagnostic.py +365 -0
  122. crawlo/tools/request_tools.py +82 -82
  123. crawlo/tools/retry_mechanism.py +224 -224
  124. crawlo/tools/scenario_adapter.py +262 -262
  125. crawlo/tools/text_cleaner.py +232 -232
  126. crawlo/utils/__init__.py +34 -34
  127. crawlo/utils/batch_processor.py +259 -259
  128. crawlo/utils/class_loader.py +26 -0
  129. crawlo/utils/controlled_spider_mixin.py +439 -439
  130. crawlo/utils/db_helper.py +343 -343
  131. crawlo/utils/enhanced_error_handler.py +356 -356
  132. crawlo/utils/env_config.py +142 -142
  133. crawlo/utils/error_handler.py +165 -124
  134. crawlo/utils/func_tools.py +82 -82
  135. crawlo/utils/large_scale_config.py +286 -286
  136. crawlo/utils/large_scale_helper.py +344 -344
  137. crawlo/utils/log.py +44 -200
  138. crawlo/utils/performance_monitor.py +285 -285
  139. crawlo/utils/queue_helper.py +175 -175
  140. crawlo/utils/redis_connection_pool.py +388 -351
  141. crawlo/utils/redis_key_validator.py +198 -198
  142. crawlo/utils/request.py +267 -267
  143. crawlo/utils/request_serializer.py +225 -218
  144. crawlo/utils/spider_loader.py +61 -61
  145. crawlo/utils/system.py +11 -11
  146. crawlo/utils/tools.py +4 -4
  147. crawlo/utils/url.py +39 -39
  148. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/METADATA +1126 -1020
  149. crawlo-1.3.4.dist-info/RECORD +278 -0
  150. examples/__init__.py +7 -7
  151. tests/__init__.py +7 -7
  152. tests/advanced_tools_example.py +275 -275
  153. tests/authenticated_proxy_example.py +107 -107
  154. tests/baidu_performance_test.py +109 -0
  155. tests/baidu_test.py +60 -0
  156. tests/cleaners_example.py +160 -160
  157. tests/comprehensive_framework_test.py +213 -0
  158. tests/comprehensive_test.py +82 -0
  159. tests/comprehensive_testing_summary.md +187 -0
  160. tests/config_validation_demo.py +142 -142
  161. tests/controlled_spider_example.py +205 -205
  162. tests/date_tools_example.py +180 -180
  163. tests/debug_configure.py +70 -0
  164. tests/debug_framework_logger.py +85 -0
  165. tests/debug_log_levels.py +64 -0
  166. tests/debug_pipelines.py +66 -66
  167. tests/distributed_test.py +67 -0
  168. tests/distributed_test_debug.py +77 -0
  169. tests/dynamic_loading_example.py +523 -523
  170. tests/dynamic_loading_test.py +104 -104
  171. tests/env_config_example.py +133 -133
  172. tests/error_handling_example.py +171 -171
  173. tests/final_command_test_report.md +0 -0
  174. tests/final_comprehensive_test.py +152 -0
  175. tests/final_validation_test.py +183 -0
  176. tests/framework_performance_test.py +203 -0
  177. tests/optimized_performance_test.py +212 -0
  178. tests/performance_comparison.py +246 -0
  179. tests/queue_blocking_test.py +114 -0
  180. tests/queue_test.py +90 -0
  181. tests/redis_key_validation_demo.py +130 -130
  182. tests/request_params_example.py +150 -150
  183. tests/response_improvements_example.py +144 -144
  184. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  185. tests/scrapy_comparison/scrapy_test.py +134 -0
  186. tests/simple_command_test.py +120 -0
  187. tests/simple_crawlo_test.py +128 -0
  188. tests/simple_log_test.py +58 -0
  189. tests/simple_optimization_test.py +129 -0
  190. tests/simple_spider_test.py +50 -0
  191. tests/simple_test.py +48 -0
  192. tests/test_advanced_tools.py +148 -148
  193. tests/test_all_commands.py +231 -0
  194. tests/test_all_redis_key_configs.py +145 -145
  195. tests/test_authenticated_proxy.py +141 -141
  196. tests/test_batch_processor.py +179 -0
  197. tests/test_cleaners.py +54 -54
  198. tests/test_component_factory.py +175 -0
  199. tests/test_comprehensive.py +146 -146
  200. tests/test_config_consistency.py +80 -80
  201. tests/test_config_merge.py +152 -152
  202. tests/test_config_validator.py +182 -182
  203. tests/test_controlled_spider_mixin.py +80 -0
  204. tests/test_crawlo_proxy_integration.py +108 -108
  205. tests/test_date_tools.py +123 -123
  206. tests/test_default_header_middleware.py +158 -158
  207. tests/test_distributed.py +65 -65
  208. tests/test_double_crawlo_fix.py +207 -207
  209. tests/test_double_crawlo_fix_simple.py +124 -124
  210. tests/test_download_delay_middleware.py +221 -221
  211. tests/test_downloader_proxy_compatibility.py +268 -268
  212. tests/test_dynamic_downloaders_proxy.py +124 -124
  213. tests/test_dynamic_proxy.py +92 -92
  214. tests/test_dynamic_proxy_config.py +146 -146
  215. tests/test_dynamic_proxy_real.py +109 -109
  216. tests/test_edge_cases.py +303 -303
  217. tests/test_enhanced_error_handler.py +270 -270
  218. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  219. tests/test_env_config.py +121 -121
  220. tests/test_error_handler_compatibility.py +112 -112
  221. tests/test_factories.py +253 -0
  222. tests/test_final_validation.py +153 -153
  223. tests/test_framework_env_usage.py +103 -103
  224. tests/test_framework_logger.py +67 -0
  225. tests/test_framework_startup.py +65 -0
  226. tests/test_integration.py +169 -169
  227. tests/test_item_dedup_redis_key.py +122 -122
  228. tests/test_large_scale_config.py +113 -0
  229. tests/test_large_scale_helper.py +236 -0
  230. tests/test_mode_change.py +73 -0
  231. tests/test_mode_consistency.py +51 -51
  232. tests/test_offsite_middleware.py +221 -221
  233. tests/test_parsel.py +29 -29
  234. tests/test_performance.py +327 -327
  235. tests/test_performance_monitor.py +116 -0
  236. tests/test_proxy_api.py +264 -264
  237. tests/test_proxy_health_check.py +32 -32
  238. tests/test_proxy_middleware.py +121 -121
  239. tests/test_proxy_middleware_enhanced.py +216 -216
  240. tests/test_proxy_middleware_integration.py +136 -136
  241. tests/test_proxy_middleware_refactored.py +184 -184
  242. tests/test_proxy_providers.py +56 -56
  243. tests/test_proxy_stats.py +19 -19
  244. tests/test_proxy_strategies.py +59 -59
  245. tests/test_queue_empty_check.py +42 -0
  246. tests/test_queue_manager_double_crawlo.py +173 -173
  247. tests/test_queue_manager_redis_key.py +176 -176
  248. tests/test_random_user_agent.py +72 -72
  249. tests/test_real_scenario_proxy.py +195 -195
  250. tests/test_redis_config.py +28 -28
  251. tests/test_redis_connection_pool.py +294 -294
  252. tests/test_redis_key_naming.py +181 -181
  253. tests/test_redis_key_validator.py +123 -123
  254. tests/test_redis_queue.py +224 -224
  255. tests/test_request_ignore_middleware.py +182 -182
  256. tests/test_request_params.py +111 -111
  257. tests/test_request_serialization.py +70 -70
  258. tests/test_response_code_middleware.py +349 -349
  259. tests/test_response_filter_middleware.py +427 -427
  260. tests/test_response_improvements.py +152 -152
  261. tests/test_retry_middleware.py +241 -241
  262. tests/test_scheduler.py +252 -252
  263. tests/test_scheduler_config_update.py +133 -133
  264. tests/test_simple_response.py +61 -61
  265. tests/test_telecom_spider_redis_key.py +205 -205
  266. tests/test_template_content.py +87 -87
  267. tests/test_template_redis_key.py +134 -134
  268. tests/test_tools.py +159 -159
  269. tests/test_user_agents.py +96 -96
  270. tests/tools_example.py +260 -260
  271. tests/untested_features_report.md +139 -0
  272. tests/verify_debug.py +52 -0
  273. tests/verify_distributed.py +117 -117
  274. tests/verify_log_fix.py +112 -0
  275. crawlo-1.3.3.dist-info/RECORD +0 -219
  276. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  277. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
  278. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
  279. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,1167 +1,639 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- Crawlo Crawler Module
5
- ====================
6
- Provides crawler process management and runtime core functionality.
7
-
8
- Core Components:
9
- - Crawler: Single crawler runtime instance, managing Spider and engine lifecycle
10
- - CrawlerProcess: Crawler process manager, supporting multi-crawler concurrent scheduling and resource management
11
-
12
- Features:
13
- - Intelligent concurrency control and resource management
14
- - Graceful shutdown and signal handling
15
- - Statistics monitoring and performance tracking
16
- - Automatic module discovery and registration
17
- - Error recovery and retry mechanism
18
- - Large-scale crawler optimization support
19
-
20
- Example Usage:
21
- # Single crawler run
22
- crawler = Crawler(MySpider, settings)
23
- await crawler.crawl()
24
-
25
- # Multi-crawler concurrent management
26
- process = CrawlerProcess()
27
- await process.crawl([Spider1, Spider2])
28
- """
29
- from __future__ import annotations
30
- import asyncio
31
- import signal
32
- import time
33
- import threading
34
- from typing import Type, Optional, Set, List, Union, Dict, Any
35
- from .spider import Spider, get_global_spider_registry
36
- from .core.engine import Engine
37
- from .subscriber import Subscriber
38
- from .extension import ExtensionManager
39
- from crawlo.utils.log import get_logger
40
- from .stats_collector import StatsCollector
41
- from .event import spider_opened, spider_closed
42
- from .settings.setting_manager import SettingManager
43
- from crawlo.project import merge_settings, get_settings
44
-
45
- logger = get_logger(__name__)
46
-
47
-
48
- class CrawlerContext:
49
- """
50
- Crawler context manager
51
- Provides shared state and resource management
52
- """
53
-
54
- def __init__(self):
55
- self.start_time = time.time()
56
- self.total_crawlers = 0
57
- self.active_crawlers = 0
58
- self.completed_crawlers = 0
59
- self.failed_crawlers = 0
60
- self.error_log = []
61
- self._lock = threading.RLock()
62
-
63
- def increment_total(self):
64
- with self._lock:
65
- self.total_crawlers += 1
66
-
67
- def increment_active(self):
68
- with self._lock:
69
- self.active_crawlers += 1
70
-
71
- def decrement_active(self):
72
- with self._lock:
73
- self.active_crawlers -= 1
74
-
75
- def increment_completed(self):
76
- with self._lock:
77
- self.completed_crawlers += 1
78
-
79
- def increment_failed(self, error: str):
80
- with self._lock:
81
- self.failed_crawlers += 1
82
- self.error_log.append({
83
- 'timestamp': time.time(),
84
- 'error': error
85
- })
86
-
87
- def get_stats(self) -> Dict[str, Any]:
88
- with self._lock:
89
- duration = time.time() - self.start_time
90
- return {
91
- 'total_crawlers': self.total_crawlers,
92
- 'active_crawlers': self.active_crawlers,
93
- 'completed_crawlers': self.completed_crawlers,
94
- 'failed_crawlers': self.failed_crawlers,
95
- 'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
96
- 'duration_seconds': round(duration, 2),
97
- 'error_count': len(self.error_log)
98
- }
99
-
100
-
101
- class Crawler:
102
- """
103
- Single crawler runtime instance, managing Spider and engine lifecycle
104
-
105
- Provides functionality:
106
- - Spider lifecycle management (initialization, running, closing)
107
- - Engine component coordination management
108
- - Configuration merging and validation
109
- - Statistics data collection
110
- - Extension management
111
- - Exception handling and cleanup
112
- """
113
-
114
- def __init__(
115
- self,
116
- spider_cls: Type[Spider],
117
- settings: SettingManager,
118
- context: Optional[CrawlerContext] = None
119
- ):
120
- self.spider_cls = spider_cls
121
- self.spider: Optional[Spider] = None
122
- self.engine: Optional[Engine] = None
123
- self.stats: Optional[StatsCollector] = None
124
- self.subscriber: Optional[Subscriber] = None
125
- self.extension: Optional[ExtensionManager] = None
126
- self.settings: SettingManager = settings.copy()
127
- self.context = context or CrawlerContext()
128
-
129
- # State management
130
- self._closed = False
131
- self._close_lock = asyncio.Lock()
132
- self._start_time = None
133
- self._end_time = None
134
-
135
- # Performance monitoring
136
- self._performance_metrics = {
137
- 'initialization_time': 0,
138
- 'crawl_duration': 0,
139
- 'memory_peak': 0,
140
- 'request_count': 0,
141
- 'error_count': 0
142
- }
143
-
144
- # Initialize components
145
- self.subscriber = self._create_subscriber()
146
- self.spider = self._create_spider()
147
- self.engine = self._create_engine()
148
- self.stats = self._create_stats()
149
- # Note: Do not initialize extension manager here, let it initialize in the engine
150
-
151
- # Validate crawler state
152
- self._validate_crawler_state()
153
-
154
- # 将启动信息的打印移到crawl方法中,避免在CrawlerProcess中重复打印
155
- # self._log_startup_info()
156
-
157
- # 将启动爬虫名称的日志移到这里,确保在日志系统配置之后打印
158
- # logger.info(f"Starting running {self.spider.name}")
159
-
160
- async def crawl(self):
161
- """
162
- Start the crawler core process
163
-
164
- Includes the following stages:
165
- 1. Initialization stage: Create all components
166
- 2. Validation stage: Check configuration and state
167
- 3. Running stage: Start the crawler engine
168
- 4. Cleanup stage: Resource release
169
- """
170
- init_start = time.time()
171
- self._start_time = init_start
172
-
173
- try:
174
- # Update context status
175
- self.context.increment_active()
176
-
177
- # Phase 1: Initialize components
178
- # Adjust component initialization order to ensure log output order meets requirements
179
- self.subscriber = self._create_subscriber()
180
- self.spider = self._create_spider()
181
- self.engine = self._create_engine()
182
- self.stats = self._create_stats()
183
- # Note: Do not initialize extension manager here, let it initialize in the engine
184
-
185
- # Record initialization time
186
- self._performance_metrics['initialization_time'] = time.time() - init_start
187
-
188
- # Phase 2: Validate state
189
- self._validate_crawler_state()
190
-
191
- # Phase 3: Display runtime configuration summary
192
- self._log_runtime_summary()
193
-
194
- # Phase 4: Start crawler
195
- crawl_start = time.time()
196
- await self.engine.start_spider(self.spider)
197
-
198
- # Record crawl time
199
- self._performance_metrics['crawl_duration'] = time.time() - crawl_start
200
- self._end_time = time.time()
201
-
202
- # Update context status
203
- self.context.increment_completed()
204
-
205
- logger.info(f"Spider {self.spider.name} completed, took {self._get_total_duration():.2f} seconds")
206
-
207
- except Exception as e:
208
- self._performance_metrics['error_count'] += 1
209
- self.context.increment_failed(str(e))
210
- logger.error(f"Spider {getattr(self.spider, 'name', 'Unknown')} failed to run: {e}", exc_info=True)
211
- raise
212
- finally:
213
- self.context.decrement_active()
214
- # Ensure resource cleanup
215
- await self._ensure_cleanup()
216
-
217
- def _log_runtime_summary(self):
218
- """Log runtime configuration summary"""
219
- # Get spider name
220
- spider_name = getattr(self.spider, 'name', 'Unknown')
221
-
222
- # Ensure spider name is a string and strip leading/trailing whitespace
223
- if spider_name:
224
- spider_name = str(spider_name).strip()
225
- else:
226
- spider_name = 'Unknown'
227
-
228
- logger.info(f"Starting running {spider_name}")
229
-
230
- def _validate_crawler_state(self):
231
- """
232
- Validate crawler state and configuration
233
- Ensure all necessary components are properly initialized
234
- """
235
- if not self.spider:
236
- raise RuntimeError("Spider instance not initialized")
237
- if not self.engine:
238
- raise RuntimeError("Engine not initialized")
239
- if not self.stats:
240
- raise RuntimeError("Stats collector not initialized")
241
- if not self.subscriber:
242
- raise RuntimeError("Event subscriber not initialized")
243
-
244
- # Check key configuration
245
- if not self.spider.name:
246
- raise ValueError("Spider name cannot be empty")
247
-
248
- logger.debug(f"Spider {self.spider.name} state validation passed")
249
-
250
- def _get_total_duration(self) -> float:
251
- """Get total runtime"""
252
- if self._start_time and self._end_time:
253
- return self._end_time - self._start_time
254
- return 0.0
255
-
256
- def _log_startup_info(self):
257
- """Print startup information, including run mode and key configuration checks"""
258
- # Get run mode
259
- run_mode = self.settings.get('RUN_MODE', 'standalone')
260
-
261
- # Get version number
262
- version = self.settings.get('VERSION', '1.0.0')
263
- if not version or version == 'None':
264
- version = '1.0.0'
265
-
266
- # Print framework start info
267
- logger.info(f"Crawlo Framework Started {version}")
268
-
269
- # Add mode info if available
270
- mode_info = self.settings.get('_mode_info')
271
- if mode_info:
272
- logger.info(mode_info)
273
- else:
274
- # 如果没有_mode_info,添加默认信息
275
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
276
-
277
- # Get actual queue type
278
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
279
-
280
- # Display information based on run mode and queue type combination
281
- if run_mode == 'distributed':
282
- logger.info("Run Mode: distributed")
283
- logger.info("Distributed Mode - Multi-node collaboration supported")
284
- # Show Redis configuration
285
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
286
- redis_port = self.settings.get('REDIS_PORT', 6379)
287
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
288
- elif run_mode == 'standalone':
289
- if queue_type == 'redis':
290
- logger.info("Run Mode: standalone+redis")
291
- # Show Redis configuration
292
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
293
- redis_port = self.settings.get('REDIS_PORT', 6379)
294
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
295
- elif queue_type == 'auto':
296
- logger.info("Run Mode: standalone+auto")
297
- else: # memory
298
- logger.info("Run Mode: standalone")
299
- else:
300
- logger.info(f"Run Mode: {run_mode}")
301
-
302
- async def _ensure_cleanup(self):
303
- """Ensure resource cleanup"""
304
- try:
305
- if not self._closed:
306
- await self.close()
307
- except Exception as e:
308
- logger.warning(f"Error cleaning up resources: {e}")
309
-
310
- def get_performance_metrics(self) -> Dict[str, Any]:
311
- """Get performance metrics"""
312
- metrics = self._performance_metrics.copy()
313
- metrics['total_duration'] = self._get_total_duration()
314
- if self.stats:
315
- # Add statistics data
316
- stats_data = getattr(self.stats, 'get_stats', lambda: {})()
317
- metrics.update(stats_data)
318
- return metrics
319
-
320
- @staticmethod
321
- def _create_subscriber() -> Subscriber:
322
- """Create event subscriber"""
323
- return Subscriber()
324
-
325
- def _create_spider(self) -> Spider:
326
- """
327
- Create and validate spider instance (enhanced version)
328
-
329
- Performs the following validations:
330
- - Spider name must exist
331
- - start_requests method must be callable
332
- - start_urls cannot be a string
333
- - parse method is recommended to exist
334
- """
335
- spider = self.spider_cls.create_instance(self)
336
-
337
- # Required attribute check
338
- if not getattr(spider, 'name', None):
339
- raise AttributeError(
340
- f"Spider class '{self.spider_cls.__name__}' must define 'name' attribute.\n"
341
- f"Example: name = 'my_spider'"
342
- )
343
-
344
- if not callable(getattr(spider, 'start_requests', None)):
345
- raise AttributeError(
346
- f"Spider '{spider.name}' must implement a callable 'start_requests' method.\n"
347
- f"Example: def start_requests(self): yield Request(url='...')"
348
- )
349
-
350
- # start_urls type check
351
- start_urls = getattr(spider, 'start_urls', [])
352
- if isinstance(start_urls, str):
353
- raise TypeError(
354
- f"Spider '{spider.name}' 'start_urls' must be a list or tuple, not a string.\n"
355
- f"Correct: start_urls = ['http://example.com']\n"
356
- f"Incorrect: start_urls = 'http://example.com'"
357
- )
358
-
359
- # parse method check (warning instead of error)
360
- if not callable(getattr(spider, 'parse', None)):
361
- logger.warning(
362
- f"Spider '{spider.name}' does not define 'parse' method.\n"
363
- f"Ensure all Requests specify a callback function, otherwise responses will be ignored."
364
- )
365
-
366
- # Set spider configuration
367
- self._set_spider(spider)
368
-
369
- logger.debug(f"Spider '{spider.name}' initialized successfully")
370
- return spider
371
-
372
- def _create_engine(self) -> Engine:
373
- """Create and initialize engine"""
374
- engine = Engine(self)
375
- engine.engine_start()
376
- logger.debug(f"Engine initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
377
- return engine
378
-
379
- def _create_stats(self) -> StatsCollector:
380
- """Create stats collector"""
381
- stats = StatsCollector(self)
382
- logger.debug(
383
- f"Stats collector initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
384
- return stats
385
-
386
- def _create_extension(self) -> ExtensionManager:
387
- """Create extension manager"""
388
- # Modify extension manager creation method, delay initialization until needed
389
- extension = ExtensionManager.create_instance(self)
390
- logger.debug(
391
- f"Extension manager initialized successfully, spider: {getattr(self.spider, 'name', 'Unknown')}")
392
- return extension
393
-
394
- def _set_spider(self, spider: Spider):
395
- """
396
- Set spider configuration and event subscription
397
- Bind spider lifecycle events with subscriber
398
- """
399
- # Subscribe to spider lifecycle events
400
- self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
401
- self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
402
-
403
- # Merge spider custom configuration
404
- merge_settings(spider, self.settings)
405
-
406
- logger.debug(f"Spider '{spider.name}' configuration merged successfully")
407
-
408
- async def close(self, reason='finished') -> None:
409
- """
410
- Close crawler and clean up resources (enhanced version)
411
-
412
- Ensure closing only once and handle all cleanup operations
413
- """
414
- async with self._close_lock:
415
- if self._closed:
416
- return
417
-
418
- self._closed = True
419
- self._end_time = time.time()
420
-
421
- try:
422
- # Notify spider close event
423
- if self.subscriber:
424
- await self.subscriber.notify(spider_closed)
425
-
426
- # Statistics data collection
427
- if self.stats and self.spider:
428
- self.stats.close_spider(spider=self.spider, reason=reason)
429
- # Record statistics data
430
- try:
431
- from crawlo.commands.stats import record_stats
432
- record_stats(self)
433
- except ImportError:
434
- logger.debug("Statistics recording module does not exist, skipping statistics recording")
435
-
436
- logger.info(
437
- f"Spider '{getattr(self.spider, 'name', 'Unknown')}' closed, "
438
- f"reason: {reason}, took: {self._get_total_duration():.2f} seconds"
439
- )
440
-
441
- except Exception as e:
442
- logger.error(f"Error closing crawler: {e}", exc_info=True)
443
- finally:
444
- # Ensure resource cleanup
445
- await self._cleanup_resources()
446
-
447
- async def _cleanup_resources(self):
448
- """Clean up all resources"""
449
- cleanup_tasks = []
450
-
451
- # Engine cleanup
452
- if self.engine:
453
- try:
454
- cleanup_tasks.append(self.engine.close())
455
- except AttributeError:
456
- pass # Engine has no close method
457
-
458
- # Extension cleanup
459
- if self.extension:
460
- try:
461
- cleanup_tasks.append(self.extension.close())
462
- except AttributeError:
463
- pass
464
-
465
- # Stats collector cleanup
466
- if self.stats:
467
- try:
468
- cleanup_tasks.append(self.stats.close())
469
- except AttributeError:
470
- pass
471
-
472
- # Concurrently execute cleanup tasks
473
- if cleanup_tasks:
474
- await asyncio.gather(*cleanup_tasks, return_exceptions=True)
475
-
476
- logger.debug("Resource cleanup completed")
477
-
478
-
479
- class CrawlerProcess:
480
- """
481
- Crawler process manager
482
-
483
- Supported features:
484
- - Multi-crawler concurrent scheduling and resource management
485
- - Automatic module discovery and spider registration
486
- - Intelligent concurrency control and load balancing
487
- - Graceful shutdown and signal handling
488
- - Real-time status monitoring and statistics
489
- - Error recovery and retry mechanism
490
- - Large-scale crawler optimization support
491
-
492
- Usage example:
493
- # Basic usage
494
- process = CrawlerProcess()
495
- await process.crawl(MySpider)
496
-
497
- # Multi-crawler concurrency
498
- await process.crawl([Spider1, Spider2, 'spider_name'])
499
-
500
- # Custom concurrency
501
- process = CrawlerProcess(max_concurrency=8)
502
- """
503
-
504
- def __init__(
505
- self,
506
- settings: Optional[SettingManager] = None,
507
- max_concurrency: Optional[int] = None,
508
- spider_modules: Optional[List[str]] = None,
509
- enable_monitoring: bool = True
510
- ):
511
- # Basic configuration
512
- self.settings: SettingManager = settings or self._get_default_settings()
513
- self.crawlers: Set[Crawler] = set()
514
- self._active_tasks: Set[asyncio.Task] = set()
515
-
516
- # Context manager
517
- self.context = CrawlerContext()
518
-
519
- # Concurrency control configuration
520
- self.max_concurrency: int = (
521
- max_concurrency
522
- or self.settings.get('MAX_RUNNING_SPIDERS')
523
- or self.settings.get('CONCURRENCY', 3)
524
- )
525
- self.semaphore = asyncio.Semaphore(self.max_concurrency)
526
-
527
- # Monitoring configuration
528
- self.enable_monitoring = enable_monitoring
529
- self._monitoring_task = None
530
- self._shutdown_event = asyncio.Event()
531
-
532
- # Automatically discover and import spider modules
533
- if spider_modules:
534
- self.auto_discover(spider_modules)
535
-
536
- # Use snapshot of global registry (avoid subsequent import impact)
537
- self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
538
-
539
- # Performance monitoring
540
- self._performance_stats = {
541
- 'total_requests': 0,
542
- 'successful_requests': 0,
543
- 'failed_requests': 0,
544
- 'memory_usage_mb': 0,
545
- 'cpu_usage_percent': 0
546
- }
547
-
548
- # Register signal handlers
549
- signal.signal(signal.SIGINT, self._shutdown)
550
- signal.signal(signal.SIGTERM, self._shutdown)
551
-
552
- def _log_startup_info(self):
553
- """Print startup information, including run mode and key configuration checks"""
554
- # Get run mode
555
- run_mode = self.settings.get('RUN_MODE', 'standalone')
556
-
557
- # Get version number
558
- version = self.settings.get('VERSION', '1.0.0')
559
- if not version or version == 'None':
560
- version = '1.0.0'
561
-
562
- # Print framework start info
563
- logger.info(f"Crawlo Framework Started {version}")
564
-
565
- # Add mode info if available
566
- mode_info = self.settings.get('_mode_info')
567
- if mode_info:
568
- logger.info(mode_info)
569
- else:
570
- # 如果没有_mode_info,添加默认信息
571
- logger.info("使用单机模式 - 简单快速,适合开发和中小规模爬取")
572
-
573
- # Get actual queue type
574
- queue_type = self.settings.get('QUEUE_TYPE', 'memory')
575
-
576
- # Display information based on run mode and queue type combination
577
- if run_mode == 'distributed':
578
- logger.info("Run Mode: distributed")
579
- logger.info("Distributed Mode - Multi-node collaboration supported")
580
- # Show Redis configuration
581
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
582
- redis_port = self.settings.get('REDIS_PORT', 6379)
583
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
584
- elif run_mode == 'standalone':
585
- if queue_type == 'redis':
586
- logger.info("Run Mode: standalone+redis")
587
- # Show Redis configuration
588
- redis_host = self.settings.get('REDIS_HOST', 'localhost')
589
- redis_port = self.settings.get('REDIS_PORT', 6379)
590
- logger.info(f"Redis Address: {redis_host}:{redis_port}")
591
- elif queue_type == 'auto':
592
- logger.info("Run Mode: standalone+auto")
593
- else: # memory
594
- logger.info("Run Mode: standalone")
595
- else:
596
- logger.info(f"Run Mode: {run_mode}")
597
-
598
- logger.debug(
599
- f"CrawlerProcess initialized successfully\n"
600
- f" - Max concurrent crawlers: {self.max_concurrency}\n"
601
- f" - Registered crawlers: {len(self._spider_registry)}\n"
602
- f" - Monitoring enabled: {self.enable_monitoring}"
603
- )
604
-
605
- async def start_monitoring(self):
606
- """Start monitoring task"""
607
- if not self.enable_monitoring:
608
- return
609
-
610
- self._monitoring_task = asyncio.create_task(self._monitor_loop())
611
- logger.debug("Monitoring task started")
612
-
613
- async def stop_monitoring(self):
614
- """Stop monitoring task"""
615
- if self._monitoring_task and not self._monitoring_task.done():
616
- self._monitoring_task.cancel()
617
- try:
618
- await self._monitoring_task
619
- except asyncio.CancelledError:
620
- pass
621
- logger.debug("Monitoring task stopped")
622
-
623
- async def _monitor_loop(self):
624
- """Monitoring loop, periodically collect and report status"""
625
- try:
626
- while not self._shutdown_event.is_set():
627
- await self._collect_performance_stats()
628
-
629
- # Output status every 30 seconds
630
- stats = self.context.get_stats()
631
- if stats['active_crawlers'] > 0:
632
- logger.debug(
633
- f"Crawler status: Active {stats['active_crawlers']}, "
634
- f"Completed {stats['completed_crawlers']}, "
635
- f"Failed {stats['failed_crawlers']}, "
636
- f"Success rate {stats['success_rate']:.1f}%"
637
- )
638
-
639
- await asyncio.sleep(30) # 30 second interval
640
-
641
- except asyncio.CancelledError:
642
- logger.debug("Monitoring loop cancelled")
643
- except Exception as e:
644
- logger.error(f"Monitoring loop error: {e}", exc_info=True)
645
-
646
- async def _collect_performance_stats(self):
647
- """Collect performance statistics data"""
648
- try:
649
- import psutil
650
- import os
651
-
652
- process = psutil.Process(os.getpid())
653
- memory_info = process.memory_info()
654
-
655
- self._performance_stats.update({
656
- 'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
657
- 'cpu_usage_percent': round(process.cpu_percent(), 2)
658
- })
659
-
660
- except ImportError:
661
- # Skip performance monitoring when psutil is not available
662
- pass
663
- except Exception as e:
664
- logger.debug(f"Failed to collect performance statistics: {e}")
665
-
666
- @staticmethod
667
- def auto_discover(modules: List[str]):
668
- """
669
- Automatically import modules, trigger Spider class definition and registration (enhanced version)
670
-
671
- Supports recursive scanning and error recovery
672
- """
673
- import importlib
674
- import pkgutil
675
-
676
- discovered_count = 0
677
- error_count = 0
678
-
679
- for module_name in modules:
680
- try:
681
- module = importlib.import_module(module_name)
682
-
683
- if hasattr(module, '__path__'):
684
- # Package module, recursive scanning
685
- for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
686
- try:
687
- importlib.import_module(name)
688
- discovered_count += 1
689
- except Exception as sub_e:
690
- error_count += 1
691
- logger.warning(f"Failed to import submodule {name}: {sub_e}")
692
- else:
693
- # Single module
694
- importlib.import_module(module_name)
695
- discovered_count += 1
696
-
697
- logger.debug(f"Module scanned: {module_name}")
698
-
699
- except Exception as e:
700
- error_count += 1
701
- logger.error(f"Failed to scan module {module_name}: {e}", exc_info=True)
702
-
703
- logger.debug(
704
- f"Spider registration completed: {discovered_count} succeeded, {error_count} failed"
705
- )
706
-
707
- # === Public read-only interface: Avoid direct access to _spider_registry ===
708
-
709
- def get_spider_names(self) -> List[str]:
710
- """Get all registered spider names"""
711
- return list(self._spider_registry.keys())
712
-
713
- def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
714
- """Get spider class by name"""
715
- return self._spider_registry.get(name)
716
-
717
- def is_spider_registered(self, name: str) -> bool:
718
- """Check if a name is registered"""
719
- return name in self._spider_registry
720
-
721
- async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
722
- """
723
- Start one or more crawlers
724
-
725
- Enhanced features:
726
- - Intelligent concurrency control
727
- - Real-time monitoring and statistics
728
- - Error recovery and retry
729
- - Graceful shutdown handling
730
- """
731
- # Phase 1: Preprocessing and validation
732
- spider_classes_to_run = self._resolve_spiders_to_run(spiders)
733
- total = len(spider_classes_to_run)
734
-
735
- if total == 0:
736
- raise ValueError("At least one spider class or name must be provided")
737
-
738
- # 打印启动信息,确保在日志系统配置之后打印
739
- # 在这里调用_log_startup_info,确保框架启动信息能正确输出到日志文件中
740
- self._log_startup_info()
741
-
742
- # Phase 2: Initialize context and monitoring
743
- for _ in range(total):
744
- self.context.increment_total()
745
-
746
- # Start monitoring task
747
- await self.start_monitoring()
748
-
749
- try:
750
- # Phase 3: Initialize context and monitoring
751
- spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
752
-
753
- logger.debug(
754
- f"Starting {total} crawlers\n"
755
- f" - Max concurrency: {self.max_concurrency}\n"
756
- f" - Spider list: {[cls.__name__ for cls in spider_classes_to_run]}"
757
- )
758
-
759
- # Phase 4: Stream start all crawler tasks
760
- tasks = [
761
- asyncio.create_task(
762
- self._run_spider_with_limit(spider_cls, index + 1, total),
763
- name=f"spider-{spider_cls.__name__}-{index + 1}"
764
- )
765
- for index, spider_cls in enumerate(spider_classes_to_run)
766
- ]
767
-
768
- # Phase 5: Wait for all tasks to complete (failures do not interrupt)
769
- results = await asyncio.gather(*tasks, return_exceptions=True)
770
-
771
- # Phase 6: Statistics exceptions and results
772
- failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
773
- successful = total - len(failed)
774
-
775
- if failed:
776
- failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
777
- logger.error(
778
- f"Crawler execution result: {successful}/{total} succeeded, {len(failed)}/{total} failed\n"
779
- f" - Failed crawlers: {failed_spiders}"
780
- )
781
-
782
- # Record detailed error information
783
- for i in failed:
784
- error = results[i]
785
- logger.error(f"Spider {spider_classes_to_run[i].__name__} error details: {error}")
786
- else:
787
- logger.info(f"All {total} crawlers completed successfully!")
788
-
789
- # Return statistics results
790
- return {
791
- 'total': total,
792
- 'successful': successful,
793
- 'failed': len(failed),
794
- 'success_rate': (successful / total) * 100 if total > 0 else 0,
795
- 'context_stats': self.context.get_stats()
796
- }
797
-
798
- finally:
799
- # Phase 7: Cleanup and shutdown
800
- await self.stop_monitoring()
801
- await self._cleanup_process()
802
-
803
- async def _cleanup_process(self):
804
- """Clean up process resources"""
805
- try:
806
- # Wait for all active crawlers to complete
807
- if self.crawlers:
808
- close_tasks = [crawler.close() for crawler in self.crawlers]
809
- await asyncio.gather(*close_tasks, return_exceptions=True)
810
- self.crawlers.clear()
811
-
812
- # Clean up active tasks
813
- if self._active_tasks:
814
- for task in list(self._active_tasks):
815
- if not task.done():
816
- task.cancel()
817
- await asyncio.gather(*self._active_tasks, return_exceptions=True)
818
- self._active_tasks.clear()
819
-
820
- logger.debug("Process resources cleanup completed")
821
-
822
- except Exception as e:
823
- logger.error(f"Error cleaning up process resources: {e}", exc_info=True)
824
-
825
- def get_process_stats(self) -> Dict[str, Any]:
826
- """Get process statistics information"""
827
- context_stats = self.context.get_stats()
828
-
829
- return {
830
- 'context': context_stats,
831
- 'performance': self._performance_stats.copy(),
832
- 'crawlers': {
833
- 'total_registered': len(self._spider_registry),
834
- 'active_crawlers': len(self.crawlers),
835
- 'max_concurrency': self.max_concurrency
836
- },
837
- 'registry': {
838
- 'spider_names': list(self._spider_registry.keys()),
839
- 'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
840
- }
841
- }
842
-
843
- def _resolve_spiders_to_run(
844
- self,
845
- spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
846
- ) -> List[Type[Spider]]:
847
- """
848
- Resolve input to spider class list
849
-
850
- Supports various input formats and validates uniqueness
851
- """
852
- inputs = self._normalize_inputs(spiders_input)
853
- seen_spider_names: Set[str] = set()
854
- spider_classes: List[Type[Spider]] = []
855
-
856
- for item in inputs:
857
- try:
858
- spider_cls = self._resolve_spider_class(item)
859
- spider_name = getattr(spider_cls, 'name', None)
860
-
861
- if not spider_name:
862
- raise ValueError(f"Spider class {spider_cls.__name__} missing 'name' attribute")
863
-
864
- if spider_name in seen_spider_names:
865
- raise ValueError(
866
- f"Duplicate spider name '{spider_name}' in this run.\n"
867
- f"Ensure each spider's name attribute is unique in this run."
868
- )
869
-
870
- seen_spider_names.add(spider_name)
871
- spider_classes.append(spider_cls)
872
-
873
- logger.debug(
874
- f"Spider resolved successfully: {item} -> {spider_cls.__name__} (name='{spider_name}')")
875
-
876
- except Exception as e:
877
- logger.error(f"Failed to resolve spider: {item} - {e}")
878
- raise
879
-
880
- return spider_classes
881
-
882
- @staticmethod
883
- def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
884
- """
885
- Normalize input to list
886
-
887
- Supports more input types and provides better error information
888
- """
889
- if isinstance(spiders_input, (type, str)):
890
- return [spiders_input]
891
- elif isinstance(spiders_input, (list, tuple, set)):
892
- spider_list = list(spiders_input)
893
- if not spider_list:
894
- raise ValueError("Spider list cannot be empty")
895
- return spider_list
896
- else:
897
- raise TypeError(
898
- f"Unsupported spiders parameter type: {type(spiders_input)}\n"
899
- f"Supported types: Spider class, name string, or their list/tuple/set"
900
- )
901
-
902
- def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
903
- """
904
- Resolve single input item to spider class
905
-
906
- Provides better error prompts and debugging information
907
- """
908
- if isinstance(item, type) and issubclass(item, Spider):
909
- # Direct Spider class
910
- return item
911
- elif isinstance(item, str):
912
- # String name, need to look up registry
913
- spider_cls = self._spider_registry.get(item)
914
- if not spider_cls:
915
- available_spiders = list(self._spider_registry.keys())
916
- raise ValueError(
917
- f"Spider named '{item}' not found.\n"
918
- f"Registered spiders: {available_spiders}\n"
919
- f"Please check if the spider name is correct, or ensure the spider has been properly imported and registered."
920
- )
921
- return spider_cls
922
- else:
923
- raise TypeError(
924
- f"Invalid type {type(item)}: {item}\n"
925
- f"Must be Spider class or string name.\n"
926
- f"Example: MySpider or 'my_spider'"
927
- )
928
-
929
- async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
930
- """
931
- Spider running function limited by semaphore
932
-
933
- Includes enhanced error handling and monitoring functionality
934
- """
935
- task = asyncio.current_task()
936
- crawler = None
937
-
938
- try:
939
- # Register task
940
- if task:
941
- self._active_tasks.add(task)
942
-
943
- # Acquire concurrency permit
944
- await self.semaphore.acquire()
945
-
946
- # start_msg = f"[{seq}/{total}] Initializing spider: {spider_cls.__name__}"
947
- # logger.info(start_msg)
948
-
949
- # Create and run crawler
950
- crawler = Crawler(spider_cls, self.settings, self.context)
951
- self.crawlers.add(crawler)
952
-
953
- # Record start time
954
- start_time = time.time()
955
-
956
- # Run crawler
957
- await crawler.crawl()
958
-
959
- # Calculate runtime
960
- duration = time.time() - start_time
961
-
962
- end_msg = (
963
- f"[{seq}/{total}] Crawler completed: {spider_cls.__name__}, "
964
- f"took: {duration:.2f} seconds"
965
- )
966
- logger.info(end_msg)
967
-
968
- # Record success statistics
969
- self._performance_stats['successful_requests'] += 1
970
-
971
- except Exception as e:
972
- # Record failure statistics
973
- self._performance_stats['failed_requests'] += 1
974
-
975
- error_msg = f"Spider {spider_cls.__name__} execution failed: {e}"
976
- logger.error(error_msg, exc_info=True)
977
-
978
- # Record error information to context
979
- if hasattr(self, 'context'):
980
- self.context.increment_failed(error_msg)
981
-
982
- raise
983
- finally:
984
- # Clean up resources
985
- try:
986
- if crawler and crawler in self.crawlers:
987
- self.crawlers.remove(crawler)
988
-
989
- if task and task in self._active_tasks:
990
- self._active_tasks.remove(task)
991
-
992
- self.semaphore.release()
993
-
994
- except Exception as cleanup_error:
995
- logger.warning(f"Error cleaning up resources: {cleanup_error}")
996
-
997
- def _shutdown(self, _signum, _frame):
998
- """
999
- Graceful shutdown signal handling
1000
-
1001
- Provides better shutdown experience and resource cleanup
1002
- """
1003
- signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
1004
- logger.warning(f"Received shutdown signal {signal_name}, stopping all crawlers...")
1005
-
1006
- # Set shutdown event
1007
- if hasattr(self, '_shutdown_event'):
1008
- self._shutdown_event.set()
1009
-
1010
- # Stop all crawler engines
1011
- for crawler in list(self.crawlers):
1012
- if crawler.engine:
1013
- crawler.engine.running = False
1014
- crawler.engine.normal = False
1015
- logger.debug(f"Crawler engine stopped: {getattr(crawler.spider, 'name', 'Unknown')}")
1016
-
1017
- # Create shutdown task
1018
- asyncio.create_task(self._wait_for_shutdown())
1019
-
1020
- logger.info("Shutdown command sent, waiting for crawlers to complete current tasks...")
1021
-
1022
- async def _wait_for_shutdown(self):
1023
- """
1024
- Wait for all active tasks to complete
1025
-
1026
- Provides better shutdown time control and progress feedback
1027
- """
1028
- try:
1029
- # Stop monitoring task
1030
- await self.stop_monitoring()
1031
-
1032
- # Wait for active tasks to complete
1033
- pending = [t for t in self._active_tasks if not t.done()]
1034
-
1035
- if pending:
1036
- logger.info(
1037
- f"Waiting for {len(pending)} active tasks to complete..."
1038
- f"(Maximum wait time: 30 seconds)"
1039
- )
1040
-
1041
- # Set timeout
1042
- try:
1043
- await asyncio.wait_for(
1044
- asyncio.gather(*pending, return_exceptions=True),
1045
- timeout=30.0
1046
- )
1047
- except asyncio.TimeoutError:
1048
- logger.warning("Some tasks timed out, forcing cancellation...")
1049
-
1050
- # Force cancel timed out tasks
1051
- for task in pending:
1052
- if not task.done():
1053
- task.cancel()
1054
-
1055
- # Wait for cancellation to complete
1056
- await asyncio.gather(*pending, return_exceptions=True)
1057
-
1058
- # Final cleanup
1059
- await self._cleanup_process()
1060
-
1061
- # Output final statistics
1062
- final_stats = self.context.get_stats()
1063
- logger.info(
1064
- f"All crawlers gracefully shut down 👋\n"
1065
- f" - Total crawlers: {final_stats['total_crawlers']}\n"
1066
- f" - Successfully completed: {final_stats['completed_crawlers']}\n"
1067
- f" - Failed: {final_stats['failed_crawlers']}\n"
1068
- f" - Success rate: {final_stats['success_rate']:.1f}%\n"
1069
- f" - Total runtime: {final_stats['duration_seconds']} seconds"
1070
- )
1071
-
1072
- except Exception as e:
1073
- logger.error(f"Error during shutdown process: {e}", exc_info=True)
1074
-
1075
- @classmethod
1076
- def _get_default_settings(cls) -> SettingManager:
1077
- """
1078
- Load default configuration
1079
-
1080
- Provides better error handling and fallback strategy
1081
- """
1082
- try:
1083
- settings = get_settings()
1084
- logger.debug("Default configuration loaded successfully")
1085
- return settings
1086
- except Exception as e:
1087
- logger.warning(f"Unable to load default configuration: {e}, using empty configuration")
1088
- return SettingManager()
1089
-
1090
- # === Utility functions ===
1091
-
1092
- def create_crawler_with_optimizations(
1093
- spider_cls: Type[Spider],
1094
- settings: Optional[SettingManager] = None,
1095
- **optimization_kwargs
1096
- ) -> Crawler:
1097
- """
1098
- Create an optimized crawler instance
1099
-
1100
- :param spider_cls: Spider class
1101
- :param settings: Settings manager
1102
- :param optimization_kwargs: Optimization parameters
1103
- :return: Crawler instance
1104
- """
1105
- if settings is None:
1106
- settings = SettingManager()
1107
-
1108
- # Apply optimization configuration
1109
- for key, value in optimization_kwargs.items():
1110
- settings.set(key, value)
1111
-
1112
- context = CrawlerContext()
1113
- return Crawler(spider_cls, settings, context)
1114
-
1115
-
1116
- def create_process_with_large_scale_config(
1117
- config_type: str = 'balanced',
1118
- concurrency: int = 16,
1119
- **kwargs
1120
- ) -> CrawlerProcess:
1121
- """
1122
- Create a process manager that supports large-scale optimization
1123
-
1124
- :param config_type: Configuration type ('conservative', 'balanced', 'aggressive', 'memory_optimized')
1125
- :param concurrency: Concurrency count
1126
- :param kwargs: Other parameters
1127
- :return: Process manager
1128
- """
1129
- try:
1130
- from crawlo.utils.large_scale_config import LargeScaleConfig
1131
-
1132
- # Get optimization configuration
1133
- config_methods = {
1134
- 'conservative': LargeScaleConfig.conservative_config,
1135
- 'balanced': LargeScaleConfig.balanced_config,
1136
- 'aggressive': LargeScaleConfig.aggressive_config,
1137
- 'memory_optimized': LargeScaleConfig.memory_optimized_config
1138
- }
1139
-
1140
- if config_type not in config_methods:
1141
- logger.warning(f"Unknown configuration type: {config_type}, using default configuration")
1142
- settings = SettingManager()
1143
- else:
1144
- config = config_methods[config_type](concurrency)
1145
- settings = SettingManager()
1146
- settings.update(config)
1147
-
1148
- return CrawlerProcess(
1149
- settings=settings,
1150
- max_concurrency=concurrency,
1151
- **kwargs
1152
- )
1153
-
1154
- except ImportError:
1155
- logger.warning("Large-scale configuration module does not exist, using default configuration")
1156
- return CrawlerProcess(max_concurrency=concurrency, **kwargs)
1157
-
1158
-
1159
- # === Exported interfaces ===
1160
-
1161
- __all__ = [
1162
- 'Crawler',
1163
- 'CrawlerProcess',
1164
- 'CrawlerContext',
1165
- 'create_crawler_with_optimizations',
1166
- 'create_process_with_large_scale_config'
1167
- ]
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 重构后的Crawler系统
5
+ ==================
6
+
7
+ 设计原则:
8
+ 1. 单一职责 - 每个类只负责一个明确的功能
9
+ 2. 依赖注入 - 通过工厂创建组件,便于测试
10
+ 3. 状态管理 - 清晰的状态转换和生命周期
11
+ 4. 错误处理 - 优雅的错误处理和恢复机制
12
+ """
13
+
14
+ import asyncio
15
+ import time
16
+ from contextlib import asynccontextmanager
17
+ from dataclasses import dataclass
18
+ from enum import Enum
19
+ from typing import Optional, Type, Dict, Any, List
20
+
21
+ from crawlo.factories import get_component_registry
22
+ from crawlo.initialization import initialize_framework, is_framework_ready
23
+ from crawlo.logging import get_logger
24
+
25
+
26
+ class CrawlerState(Enum):
27
+ """Crawler状态枚举"""
28
+ CREATED = "created"
29
+ INITIALIZING = "initializing"
30
+ READY = "ready"
31
+ RUNNING = "running"
32
+ CLOSING = "closing"
33
+ CLOSED = "closed"
34
+ ERROR = "error"
35
+
36
+
37
+ @dataclass
38
+ class CrawlerMetrics:
39
+ """Crawler性能指标"""
40
+ start_time: Optional[float] = None
41
+ end_time: Optional[float] = None
42
+ initialization_duration: float = 0.0
43
+ crawl_duration: float = 0.0
44
+ request_count: int = 0
45
+ success_count: int = 0
46
+ error_count: int = 0
47
+
48
+ def get_total_duration(self) -> float:
49
+ if self.start_time and self.end_time:
50
+ return self.end_time - self.start_time
51
+ return 0.0
52
+
53
+ def get_success_rate(self) -> float:
54
+ total = self.success_count + self.error_count
55
+ return (self.success_count / total * 100) if total > 0 else 0.0
56
+
57
+
58
+ class ModernCrawler:
59
+ """
60
+ 现代化的Crawler实现
61
+
62
+ 特点:
63
+ 1. 清晰的状态管理
64
+ 2. 依赖注入
65
+ 3. 组件化架构
66
+ 4. 完善的错误处理
67
+ """
68
+
69
+ def __init__(self, spider_cls: Type, settings=None):
70
+ self._spider_cls = spider_cls
71
+ self._settings = settings
72
+ self._state = CrawlerState.CREATED
73
+ self._state_lock = asyncio.Lock()
74
+
75
+ # 组件
76
+ self._spider = None
77
+ self._engine = None
78
+ self._stats = None
79
+ self._subscriber = None
80
+ self._extension = None
81
+
82
+ # 指标
83
+ self._metrics = CrawlerMetrics()
84
+
85
+ # 日志
86
+ self._logger = get_logger(f'crawler.{spider_cls.__name__ if spider_cls else "unknown"}')
87
+
88
+ # 确保框架已初始化
89
+ self._ensure_framework_ready()
90
+
91
+ def _ensure_framework_ready(self):
92
+ """确保框架已准备就绪"""
93
+ if not is_framework_ready():
94
+ try:
95
+ self._settings = initialize_framework(self._settings)
96
+ self._logger.debug("Framework initialized successfully")
97
+ except Exception as e:
98
+ self._logger.warning(f"Framework initialization failed: {e}")
99
+ # 使用降级策略
100
+ if not self._settings:
101
+ from crawlo.settings.setting_manager import SettingManager
102
+ self._settings = SettingManager()
103
+
104
+ # 确保是SettingManager实例
105
+ if isinstance(self._settings, dict):
106
+ from crawlo.settings.setting_manager import SettingManager
107
+ settings_manager = SettingManager()
108
+ settings_manager.update_attributes(self._settings)
109
+ self._settings = settings_manager
110
+
111
+ @property
112
+ def state(self) -> CrawlerState:
113
+ """获取当前状态"""
114
+ return self._state
115
+
116
+ @property
117
+ def spider(self):
118
+ """获取Spider实例"""
119
+ return self._spider
120
+
121
+ @property
122
+ def stats(self):
123
+ """获取Stats实例(向后兼容)"""
124
+ return self._stats
125
+
126
+ @property
127
+ def metrics(self) -> CrawlerMetrics:
128
+ """获取性能指标"""
129
+ return self._metrics
130
+
131
+ @property
132
+ def settings(self):
133
+ """获取配置"""
134
+ return self._settings
135
+
136
+ @property
137
+ def engine(self):
138
+ """获取Engine实例(向后兼容)"""
139
+ return self._engine
140
+
141
+ @property
142
+ def subscriber(self):
143
+ """获取Subscriber实例(向后兼容)"""
144
+ return self._subscriber
145
+
146
+ @property
147
+ def extension(self):
148
+ """获取Extension实例(向后兼容)"""
149
+ return self._extension
150
+
151
+ @extension.setter
152
+ def extension(self, value):
153
+ """设置Extension实例(向后兼容)"""
154
+ self._extension = value
155
+
156
+ def _create_extension(self):
157
+ """创建Extension管理器(向后兼容)"""
158
+ if self._extension is None:
159
+ try:
160
+ registry = get_component_registry()
161
+ self._extension = registry.create('extension_manager', crawler=self)
162
+ except Exception as e:
163
+ self._logger.warning(f"Failed to create extension manager: {e}")
164
+ return self._extension
165
+
166
+ async def close(self):
167
+ """关闭爹虫(向后兼容)"""
168
+ await self._cleanup()
169
+
170
+ async def crawl(self):
171
+ """执行爬取任务"""
172
+ async with self._lifecycle_manager():
173
+ await self._initialize_components()
174
+ await self._run_crawler()
175
+
176
+ @asynccontextmanager
177
+ async def _lifecycle_manager(self):
178
+ """生命周期管理"""
179
+ self._metrics.start_time = time.time()
180
+
181
+ try:
182
+ yield
183
+ except Exception as e:
184
+ await self._handle_error(e)
185
+ raise
186
+ finally:
187
+ await self._cleanup()
188
+ self._metrics.end_time = time.time()
189
+
190
+ async def _initialize_components(self):
191
+ """初始化组件"""
192
+ async with self._state_lock:
193
+ if self._state != CrawlerState.CREATED:
194
+ raise RuntimeError(f"Cannot initialize from state {self._state}")
195
+
196
+ self._state = CrawlerState.INITIALIZING
197
+
198
+ init_start = time.time()
199
+
200
+ try:
201
+ # 使用组件工厂创建组件
202
+ registry = get_component_registry()
203
+
204
+ # 创建Subscriber(无依赖)
205
+ self._subscriber = registry.create('subscriber')
206
+
207
+ # 创建Spider
208
+ self._spider = self._create_spider()
209
+
210
+ # 创建Engine(需要crawler参数)
211
+ self._engine = registry.create('engine', crawler=self)
212
+
213
+ # 创建Stats(需要crawler参数)
214
+ self._stats = registry.create('stats', crawler=self)
215
+
216
+ # 创建Extension Manager (可选,需要crawler参数)
217
+ try:
218
+ self._extension = registry.create('extension_manager', crawler=self)
219
+ except Exception as e:
220
+ self._logger.warning(f"Failed to create extension manager: {e}")
221
+
222
+ self._metrics.initialization_duration = time.time() - init_start
223
+
224
+ async with self._state_lock:
225
+ self._state = CrawlerState.READY
226
+
227
+ self._logger.debug(f"Crawler components initialized successfully in {self._metrics.initialization_duration:.2f}s")
228
+
229
+ except Exception as e:
230
+ async with self._state_lock:
231
+ self._state = CrawlerState.ERROR
232
+ raise RuntimeError(f"Component initialization failed: {e}")
233
+
234
+ def _create_spider(self):
235
+ """创建Spider实例"""
236
+ if not self._spider_cls:
237
+ raise ValueError("Spider class not provided")
238
+
239
+ # 检查Spider类的有效性
240
+ if not hasattr(self._spider_cls, 'name'):
241
+ raise ValueError("Spider class must have 'name' attribute")
242
+
243
+ # 创建Spider实例
244
+ spider = self._spider_cls()
245
+
246
+ # 设置crawler引用
247
+ if hasattr(spider, 'crawler'):
248
+ spider.crawler = self
249
+
250
+ return spider
251
+
252
+ async def _run_crawler(self):
253
+ """运行爬虫引擎"""
254
+ async with self._state_lock:
255
+ if self._state != CrawlerState.READY:
256
+ raise RuntimeError(f"Cannot run from state {self._state}")
257
+
258
+ self._state = CrawlerState.RUNNING
259
+
260
+ crawl_start = time.time()
261
+
262
+ try:
263
+ # 启动引擎
264
+ if self._engine:
265
+ await self._engine.start_spider(self._spider)
266
+ else:
267
+ raise RuntimeError("Engine not initialized")
268
+
269
+ self._metrics.crawl_duration = time.time() - crawl_start
270
+
271
+ self._logger.info(f"Crawler completed successfully in {self._metrics.crawl_duration:.2f}s")
272
+
273
+ except Exception as e:
274
+ self._metrics.crawl_duration = time.time() - crawl_start
275
+ raise RuntimeError(f"Crawler execution failed: {e}")
276
+
277
+ async def _handle_error(self, error: Exception):
278
+ """处理错误"""
279
+ async with self._state_lock:
280
+ self._state = CrawlerState.ERROR
281
+
282
+ self._metrics.error_count += 1
283
+ self._logger.error(f"Crawler error: {error}", exc_info=True)
284
+
285
+ # 这里可以添加错误恢复逻辑
286
+
287
+ async def _cleanup(self):
288
+ """清理资源"""
289
+ async with self._state_lock:
290
+ if self._state not in [CrawlerState.CLOSING, CrawlerState.CLOSED]:
291
+ self._state = CrawlerState.CLOSING
292
+
293
+ try:
294
+ # 关闭各个组件
295
+ if self._engine and hasattr(self._engine, 'close'):
296
+ try:
297
+ await self._engine.close()
298
+ except Exception as e:
299
+ self._logger.warning(f"Engine cleanup failed: {e}")
300
+
301
+ # 调用Spider的spider_closed方法
302
+ if self._spider:
303
+ try:
304
+ if asyncio.iscoroutinefunction(self._spider.spider_closed):
305
+ await self._spider.spider_closed()
306
+ else:
307
+ self._spider.spider_closed()
308
+ except Exception as e:
309
+ self._logger.warning(f"Spider cleanup failed: {e}")
310
+
311
+ if self._stats and hasattr(self._stats, 'close'):
312
+ try:
313
+ close_result = self._stats.close()
314
+ if asyncio.iscoroutine(close_result):
315
+ await close_result
316
+ except Exception as e:
317
+ self._logger.warning(f"Stats cleanup failed: {e}")
318
+
319
+ async with self._state_lock:
320
+ self._state = CrawlerState.CLOSED
321
+
322
+ self._logger.debug("Crawler cleanup completed")
323
+
324
+ except Exception as e:
325
+ self._logger.error(f"Cleanup error: {e}")
326
+
327
+
328
+ class CrawlerProcess:
329
+ """
330
+ Crawler进程管理器 - 管理多个Crawler的执行
331
+
332
+ 简化版本,专注于核心功能
333
+ """
334
+
335
+ def __init__(self, settings=None, max_concurrency: int = 3, spider_modules=None):
336
+ self._settings = settings or initialize_framework()
337
+ self._max_concurrency = max_concurrency
338
+ self._crawlers: List[ModernCrawler] = []
339
+ self._semaphore = asyncio.Semaphore(max_concurrency)
340
+ self._logger = get_logger('crawler.process')
341
+ self._spider_modules = spider_modules # 保存spider_modules
342
+
343
+ # 如果提供了spider_modules,自动注册这些模块中的爬虫
344
+ if spider_modules:
345
+ self._register_spider_modules(spider_modules)
346
+
347
+ # 指标
348
+ self._start_time: Optional[float] = None
349
+ self._end_time: Optional[float] = None
350
+
351
+ def _register_spider_modules(self, spider_modules):
352
+ """注册爬虫模块"""
353
+ try:
354
+ from crawlo.spider import get_global_spider_registry
355
+ registry = get_global_spider_registry()
356
+
357
+ self._logger.debug(f"Registering spider modules: {spider_modules}")
358
+
359
+ initial_spider_count = len(registry)
360
+
361
+ for module_path in spider_modules:
362
+ try:
363
+ # 导入模块
364
+ __import__(module_path)
365
+ self._logger.debug(f"Successfully imported spider module: {module_path}")
366
+ except ImportError as e:
367
+ self._logger.warning(f"Failed to import spider module {module_path}: {e}")
368
+ # 如果导入失败,尝试自动发现
369
+ self._auto_discover_spider_modules([module_path])
370
+
371
+ # 检查注册表中的爬虫
372
+ spider_names = list(registry.keys())
373
+ self._logger.debug(f"Registered spiders after import: {spider_names}")
374
+
375
+ # 如果导入模块后没有新的爬虫被注册,则尝试自动发现
376
+ final_spider_count = len(registry)
377
+ if final_spider_count == initial_spider_count:
378
+ self._logger.debug("No new spiders registered after importing modules, attempting auto-discovery")
379
+ self._auto_discover_spider_modules(spider_modules)
380
+ spider_names = list(registry.keys())
381
+ self._logger.debug(f"Registered spiders after auto-discovery: {spider_names}")
382
+ except Exception as e:
383
+ self._logger.warning(f"Error registering spider modules: {e}")
384
+
385
+ def _auto_discover_spider_modules(self, spider_modules):
386
+ """
387
+ 自动发现并导入爬虫模块中的所有爬虫
388
+ 这个方法会扫描指定模块目录下的所有Python文件并自动导入
389
+ """
390
+ try:
391
+ from crawlo.spider import get_global_spider_registry
392
+ import importlib
393
+ from pathlib import Path
394
+ import sys
395
+
396
+ registry = get_global_spider_registry()
397
+ initial_spider_count = len(registry)
398
+
399
+ for module_path in spider_modules:
400
+ try:
401
+ # 将模块路径转换为文件系统路径
402
+ # 例如: ofweek_standalone.spiders -> ofweek_standalone/spiders
403
+ package_parts = module_path.split('.')
404
+ if len(package_parts) < 2:
405
+ continue
406
+
407
+ # 获取项目根目录
408
+ project_root = None
409
+ for path in sys.path:
410
+ if path and Path(path).exists():
411
+ possible_module_path = Path(path) / package_parts[0]
412
+ if possible_module_path.exists():
413
+ project_root = path
414
+ break
415
+
416
+ if not project_root:
417
+ # 尝试使用当前工作目录
418
+ project_root = str(Path.cwd())
419
+
420
+ # 构建模块目录路径
421
+ module_dir = Path(project_root)
422
+ for part in package_parts:
423
+ module_dir = module_dir / part
424
+
425
+ # 如果目录存在,扫描其中的Python文件
426
+ if module_dir.exists() and module_dir.is_dir():
427
+ # 导入目录下的所有Python文件(除了__init__.py)
428
+ for py_file in module_dir.glob("*.py"):
429
+ if py_file.name.startswith('_'):
430
+ continue
431
+
432
+ # 构造模块名
433
+ module_name = py_file.stem # 文件名(不含扩展名)
434
+ full_module_path = f"{module_path}.{module_name}"
435
+
436
+ try:
437
+ # 导入模块以触发Spider注册
438
+ importlib.import_module(full_module_path)
439
+ except ImportError as e:
440
+ self._logger.warning(f"Failed to auto-import spider module {full_module_path}: {e}")
441
+ except Exception as e:
442
+ self._logger.warning(f"Error during auto-discovery for module {module_path}: {e}")
443
+
444
+ # 检查是否有新的爬虫被注册
445
+ final_spider_count = len(registry)
446
+ if final_spider_count > initial_spider_count:
447
+ new_spiders = list(registry.keys())
448
+ self._logger.info(f"Auto-discovered {final_spider_count - initial_spider_count} new spiders: {new_spiders}")
449
+
450
+ except Exception as e:
451
+ self._logger.warning(f"Error during auto-discovery of spider modules: {e}")
452
+
453
+ def is_spider_registered(self, name: str) -> bool:
454
+ """检查爬虫是否已注册"""
455
+ from crawlo.spider import get_global_spider_registry
456
+ registry = get_global_spider_registry()
457
+ return name in registry
458
+
459
+ def get_spider_class(self, name: str):
460
+ """获取爬虫类"""
461
+ from crawlo.spider import get_global_spider_registry
462
+ registry = get_global_spider_registry()
463
+ return registry.get(name)
464
+
465
+ def get_spider_names(self):
466
+ """获取所有注册的爬虫名称"""
467
+ from crawlo.spider import get_global_spider_registry
468
+ registry = get_global_spider_registry()
469
+ return list(registry.keys())
470
+
471
+ async def crawl(self, spider_cls_or_name, settings=None):
472
+ """运行单个爬虫"""
473
+ spider_cls = self._resolve_spider_class(spider_cls_or_name)
474
+
475
+ # 记录启动的爬虫名称(符合规范要求)
476
+ from crawlo.logging import get_logger
477
+ logger = get_logger('crawlo.framework')
478
+ logger.info(f"Starting spider: {spider_cls.name}")
479
+
480
+ merged_settings = self._merge_settings(settings)
481
+ crawler = ModernCrawler(spider_cls, merged_settings)
482
+
483
+ async with self._semaphore:
484
+ await crawler.crawl()
485
+
486
+ return crawler
487
+
488
+ async def crawl_multiple(self, spider_classes_or_names, settings=None):
489
+ """运行多个爬虫"""
490
+ self._start_time = time.time()
491
+
492
+ try:
493
+ spider_classes = []
494
+ for cls_or_name in spider_classes_or_names:
495
+ spider_cls = self._resolve_spider_class(cls_or_name)
496
+ spider_classes.append(spider_cls)
497
+
498
+ # 记录启动的爬虫名称(符合规范要求)
499
+ spider_names = [cls.name for cls in spider_classes]
500
+ from crawlo.logging import get_logger
501
+ logger = get_logger('crawlo.framework')
502
+ if len(spider_names) == 1:
503
+ logger.info(f"Starting spider: {spider_names[0]}")
504
+ else:
505
+ logger.info(f"Starting spiders: {', '.join(spider_names)}")
506
+
507
+ tasks = []
508
+ for spider_cls in spider_classes:
509
+ merged_settings = self._merge_settings(settings)
510
+ crawler = ModernCrawler(spider_cls, merged_settings)
511
+ self._crawlers.append(crawler)
512
+
513
+ task = asyncio.create_task(self._run_with_semaphore(crawler))
514
+ tasks.append(task)
515
+
516
+ results = await asyncio.gather(*tasks, return_exceptions=True)
517
+
518
+ # 处理结果
519
+ successful = sum(1 for r in results if not isinstance(r, Exception))
520
+ failed = len(results) - successful
521
+
522
+ self._logger.info(f"Crawl completed: {successful} successful, {failed} failed")
523
+
524
+ return results
525
+
526
+ finally:
527
+ self._end_time = time.time()
528
+ if self._start_time:
529
+ duration = self._end_time - self._start_time
530
+ self._logger.info(f"Total execution time: {duration:.2f}s")
531
+
532
+ async def _run_with_semaphore(self, crawler: ModernCrawler):
533
+ """在信号量控制下运行爬虫"""
534
+ async with self._semaphore:
535
+ await crawler.crawl()
536
+ return crawler
537
+
538
+ def _resolve_spider_class(self, spider_cls_or_name):
539
+ """解析Spider类"""
540
+ if isinstance(spider_cls_or_name, str):
541
+ # 从注册表中查找
542
+ try:
543
+ from crawlo.spider import get_global_spider_registry
544
+ registry = get_global_spider_registry()
545
+ if spider_cls_or_name in registry:
546
+ return registry[spider_cls_or_name]
547
+ else:
548
+ # 如果在注册表中找不到,尝试通过spider_modules导入所有模块来触发注册
549
+ # 然后再次检查注册表
550
+ if hasattr(self, '_spider_modules') and self._spider_modules:
551
+ for module_path in self._spider_modules:
552
+ try:
553
+ # 导入模块来触发爬虫注册
554
+ __import__(module_path)
555
+ except ImportError:
556
+ pass # 忽略导入错误
557
+
558
+ # 再次检查注册表
559
+ if spider_cls_or_name in registry:
560
+ return registry[spider_cls_or_name]
561
+
562
+ # 如果仍然找不到,尝试自动发现模式
563
+ if hasattr(self, '_spider_modules') and self._spider_modules:
564
+ self._auto_discover_spider_modules(self._spider_modules)
565
+ if spider_cls_or_name in registry:
566
+ return registry[spider_cls_or_name]
567
+
568
+ # 如果仍然找不到,尝试直接导入模块
569
+ try:
570
+ # 假设格式为 module.SpiderClass
571
+ if '.' in spider_cls_or_name:
572
+ module_path, class_name = spider_cls_or_name.rsplit('.', 1)
573
+ module = __import__(module_path, fromlist=[class_name])
574
+ spider_class = getattr(module, class_name)
575
+ # 注册到全局注册表
576
+ registry[spider_class.name] = spider_class
577
+ return spider_class
578
+ else:
579
+ # 尝试在spider_modules中查找
580
+ if hasattr(self, '_spider_modules') and self._spider_modules:
581
+ for module_path in self._spider_modules:
582
+ try:
583
+ # 构造完整的模块路径
584
+ full_module_path = f"{module_path}.{spider_cls_or_name}"
585
+ module = __import__(full_module_path, fromlist=[spider_cls_or_name])
586
+ # 获取模块中的Spider子类
587
+ for attr_name in dir(module):
588
+ attr_value = getattr(module, attr_name)
589
+ if (isinstance(attr_value, type) and
590
+ issubclass(attr_value, registry.__class__.__bases__[0]) and
591
+ hasattr(attr_value, 'name') and
592
+ attr_value.name == spider_cls_or_name):
593
+ # 注册到全局注册表
594
+ registry[spider_cls_or_name] = attr_value
595
+ return attr_value
596
+ except ImportError:
597
+ continue
598
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
599
+ except (ImportError, AttributeError):
600
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
601
+ except ImportError:
602
+ raise ValueError(f"Cannot resolve spider name '{spider_cls_or_name}'")
603
+ else:
604
+ return spider_cls_or_name
605
+
606
+ def _merge_settings(self, additional_settings):
607
+ """合并配置"""
608
+ if not additional_settings:
609
+ return self._settings
610
+
611
+ # 这里可以实现更复杂的配置合并逻辑
612
+ from crawlo.settings.setting_manager import SettingManager
613
+ merged = SettingManager()
614
+
615
+ # 复制基础配置
616
+ if self._settings:
617
+ merged.update_attributes(self._settings.__dict__)
618
+
619
+ # 应用额外配置
620
+ merged.update_attributes(additional_settings)
621
+
622
+ return merged
623
+
624
+ def get_metrics(self) -> Dict[str, Any]:
625
+ """获取整体指标"""
626
+ total_duration = 0.0
627
+ if self._start_time and self._end_time:
628
+ total_duration = self._end_time - self._start_time
629
+
630
+ crawler_metrics = [crawler.metrics for crawler in self._crawlers]
631
+
632
+ return {
633
+ 'total_duration': total_duration,
634
+ 'crawler_count': len(self._crawlers),
635
+ 'total_requests': sum(m.request_count for m in crawler_metrics),
636
+ 'total_success': sum(m.success_count for m in crawler_metrics),
637
+ 'total_errors': sum(m.error_count for m in crawler_metrics),
638
+ 'average_success_rate': sum(m.get_success_rate() for m in crawler_metrics) / len(crawler_metrics) if crawler_metrics else 0.0
639
+ }