crawlo 1.4.1__py3-none-any.whl → 1.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (326) hide show
  1. crawlo/__init__.py +93 -93
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -341
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +52 -52
  16. crawlo/core/engine.py +438 -439
  17. crawlo/core/processor.py +47 -47
  18. crawlo/core/scheduler.py +291 -257
  19. crawlo/crawler.py +650 -650
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +233 -228
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +63 -63
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +94 -94
  34. crawlo/extension/log_stats.py +70 -70
  35. crawlo/extension/logging_extension.py +61 -61
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +27 -27
  40. crawlo/factories/base.py +68 -68
  41. crawlo/factories/crawler.py +103 -103
  42. crawlo/factories/registry.py +84 -84
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -257
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -292
  47. crawlo/initialization/__init__.py +44 -44
  48. crawlo/initialization/built_in.py +425 -425
  49. crawlo/initialization/context.py +141 -141
  50. crawlo/initialization/core.py +193 -193
  51. crawlo/initialization/phases.py +148 -148
  52. crawlo/initialization/registry.py +145 -145
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -23
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +45 -37
  58. crawlo/logging/async_handler.py +181 -0
  59. crawlo/logging/config.py +196 -96
  60. crawlo/logging/factory.py +171 -128
  61. crawlo/logging/manager.py +111 -111
  62. crawlo/logging/monitor.py +153 -0
  63. crawlo/logging/sampler.py +167 -0
  64. crawlo/middleware/__init__.py +21 -21
  65. crawlo/middleware/default_header.py +132 -132
  66. crawlo/middleware/download_delay.py +104 -104
  67. crawlo/middleware/middleware_manager.py +135 -135
  68. crawlo/middleware/offsite.py +123 -123
  69. crawlo/middleware/proxy.py +386 -386
  70. crawlo/middleware/request_ignore.py +86 -86
  71. crawlo/middleware/response_code.py +150 -150
  72. crawlo/middleware/response_filter.py +136 -136
  73. crawlo/middleware/retry.py +124 -124
  74. crawlo/middleware/simple_proxy.py +65 -65
  75. crawlo/mode_manager.py +219 -219
  76. crawlo/network/__init__.py +21 -21
  77. crawlo/network/request.py +379 -379
  78. crawlo/network/response.py +359 -359
  79. crawlo/pipelines/__init__.py +21 -21
  80. crawlo/pipelines/bloom_dedup_pipeline.py +146 -146
  81. crawlo/pipelines/console_pipeline.py +39 -39
  82. crawlo/pipelines/csv_pipeline.py +316 -316
  83. crawlo/pipelines/database_dedup_pipeline.py +197 -197
  84. crawlo/pipelines/json_pipeline.py +218 -218
  85. crawlo/pipelines/memory_dedup_pipeline.py +105 -105
  86. crawlo/pipelines/mongo_pipeline.py +131 -131
  87. crawlo/pipelines/mysql_pipeline.py +325 -325
  88. crawlo/pipelines/pipeline_manager.py +100 -84
  89. crawlo/pipelines/redis_dedup_pipeline.py +156 -156
  90. crawlo/project.py +349 -338
  91. crawlo/queue/pqueue.py +42 -42
  92. crawlo/queue/queue_manager.py +526 -522
  93. crawlo/queue/redis_priority_queue.py +370 -367
  94. crawlo/settings/__init__.py +7 -7
  95. crawlo/settings/default_settings.py +284 -284
  96. crawlo/settings/setting_manager.py +219 -219
  97. crawlo/spider/__init__.py +657 -657
  98. crawlo/stats_collector.py +73 -73
  99. crawlo/subscriber.py +129 -129
  100. crawlo/task_manager.py +138 -138
  101. crawlo/templates/crawlo.cfg.tmpl +10 -10
  102. crawlo/templates/project/__init__.py.tmpl +3 -3
  103. crawlo/templates/project/items.py.tmpl +17 -17
  104. crawlo/templates/project/middlewares.py.tmpl +118 -118
  105. crawlo/templates/project/pipelines.py.tmpl +96 -96
  106. crawlo/templates/project/settings.py.tmpl +170 -170
  107. crawlo/templates/project/settings_distributed.py.tmpl +169 -169
  108. crawlo/templates/project/settings_gentle.py.tmpl +166 -166
  109. crawlo/templates/project/settings_high_performance.py.tmpl +167 -167
  110. crawlo/templates/project/settings_minimal.py.tmpl +65 -65
  111. crawlo/templates/project/settings_simple.py.tmpl +164 -164
  112. crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
  113. crawlo/templates/run.py.tmpl +34 -34
  114. crawlo/templates/spider/spider.py.tmpl +143 -143
  115. crawlo/templates/spiders_init.py.tmpl +9 -9
  116. crawlo/tools/__init__.py +200 -200
  117. crawlo/tools/anti_crawler.py +268 -268
  118. crawlo/tools/authenticated_proxy.py +240 -240
  119. crawlo/tools/data_formatter.py +225 -225
  120. crawlo/tools/data_validator.py +180 -180
  121. crawlo/tools/date_tools.py +289 -289
  122. crawlo/tools/distributed_coordinator.py +384 -384
  123. crawlo/tools/encoding_converter.py +127 -127
  124. crawlo/tools/network_diagnostic.py +364 -364
  125. crawlo/tools/request_tools.py +82 -82
  126. crawlo/tools/retry_mechanism.py +224 -224
  127. crawlo/tools/scenario_adapter.py +262 -262
  128. crawlo/tools/text_cleaner.py +232 -232
  129. crawlo/utils/__init__.py +34 -34
  130. crawlo/utils/batch_processor.py +259 -259
  131. crawlo/utils/class_loader.py +25 -25
  132. crawlo/utils/controlled_spider_mixin.py +439 -439
  133. crawlo/utils/db_helper.py +343 -343
  134. crawlo/utils/enhanced_error_handler.py +356 -356
  135. crawlo/utils/env_config.py +142 -142
  136. crawlo/utils/error_handler.py +165 -165
  137. crawlo/utils/fingerprint.py +122 -122
  138. crawlo/utils/func_tools.py +82 -82
  139. crawlo/utils/large_scale_config.py +286 -286
  140. crawlo/utils/large_scale_helper.py +344 -344
  141. crawlo/utils/log.py +79 -79
  142. crawlo/utils/performance_monitor.py +285 -285
  143. crawlo/utils/queue_helper.py +175 -175
  144. crawlo/utils/redis_connection_pool.py +388 -388
  145. crawlo/utils/redis_key_validator.py +198 -198
  146. crawlo/utils/request.py +267 -267
  147. crawlo/utils/request_serializer.py +225 -225
  148. crawlo/utils/spider_loader.py +61 -61
  149. crawlo/utils/system.py +11 -11
  150. crawlo/utils/tools.py +4 -4
  151. crawlo/utils/url.py +39 -39
  152. crawlo-1.4.3.dist-info/METADATA +190 -0
  153. crawlo-1.4.3.dist-info/RECORD +326 -0
  154. examples/__init__.py +7 -7
  155. examples/test_project/__init__.py +7 -7
  156. examples/test_project/run.py +34 -34
  157. examples/test_project/test_project/__init__.py +3 -3
  158. examples/test_project/test_project/items.py +17 -17
  159. examples/test_project/test_project/middlewares.py +118 -118
  160. examples/test_project/test_project/pipelines.py +96 -96
  161. examples/test_project/test_project/settings.py +169 -169
  162. examples/test_project/test_project/spiders/__init__.py +9 -9
  163. examples/test_project/test_project/spiders/of_week_dis.py +143 -143
  164. tests/__init__.py +7 -7
  165. tests/advanced_tools_example.py +275 -275
  166. tests/authenticated_proxy_example.py +106 -106
  167. tests/baidu_performance_test.py +108 -108
  168. tests/baidu_test.py +59 -59
  169. tests/cleaners_example.py +160 -160
  170. tests/comprehensive_framework_test.py +212 -212
  171. tests/comprehensive_test.py +81 -81
  172. tests/comprehensive_testing_summary.md +186 -186
  173. tests/config_validation_demo.py +142 -142
  174. tests/controlled_spider_example.py +205 -205
  175. tests/date_tools_example.py +180 -180
  176. tests/debug_configure.py +69 -69
  177. tests/debug_framework_logger.py +84 -84
  178. tests/debug_log_config.py +126 -126
  179. tests/debug_log_levels.py +63 -63
  180. tests/debug_pipelines.py +66 -66
  181. tests/detailed_log_test.py +233 -233
  182. tests/distributed_test.py +66 -66
  183. tests/distributed_test_debug.py +76 -76
  184. tests/dynamic_loading_example.py +523 -523
  185. tests/dynamic_loading_test.py +104 -104
  186. tests/env_config_example.py +133 -133
  187. tests/error_handling_example.py +171 -171
  188. tests/final_comprehensive_test.py +151 -151
  189. tests/final_log_test.py +260 -260
  190. tests/final_validation_test.py +182 -182
  191. tests/fix_log_test.py +142 -142
  192. tests/framework_performance_test.py +202 -202
  193. tests/log_buffering_test.py +111 -111
  194. tests/log_generation_timing_test.py +153 -153
  195. tests/optimized_performance_test.py +211 -211
  196. tests/performance_comparison.py +245 -245
  197. tests/queue_blocking_test.py +113 -113
  198. tests/queue_test.py +89 -89
  199. tests/redis_key_validation_demo.py +130 -130
  200. tests/request_params_example.py +150 -150
  201. tests/response_improvements_example.py +144 -144
  202. tests/scrapy_comparison/ofweek_scrapy.py +138 -138
  203. tests/scrapy_comparison/scrapy_test.py +133 -133
  204. tests/simple_command_test.py +119 -119
  205. tests/simple_crawlo_test.py +127 -127
  206. tests/simple_log_test.py +57 -57
  207. tests/simple_log_test2.py +137 -137
  208. tests/simple_optimization_test.py +128 -128
  209. tests/simple_queue_type_test.py +41 -41
  210. tests/simple_spider_test.py +49 -49
  211. tests/simple_test.py +47 -47
  212. tests/spider_log_timing_test.py +177 -177
  213. tests/test_advanced_tools.py +148 -148
  214. tests/test_all_commands.py +230 -230
  215. tests/test_all_pipeline_fingerprints.py +133 -133
  216. tests/test_all_redis_key_configs.py +145 -145
  217. tests/test_authenticated_proxy.py +141 -141
  218. tests/test_batch_processor.py +178 -178
  219. tests/test_cleaners.py +54 -54
  220. tests/test_component_factory.py +174 -174
  221. tests/test_comprehensive.py +146 -146
  222. tests/test_config_consistency.py +80 -80
  223. tests/test_config_merge.py +152 -152
  224. tests/test_config_validator.py +182 -182
  225. tests/test_controlled_spider_mixin.py +79 -79
  226. tests/test_crawlo_proxy_integration.py +108 -108
  227. tests/test_date_tools.py +123 -123
  228. tests/test_dedup_fix.py +220 -220
  229. tests/test_dedup_pipeline_consistency.py +125 -0
  230. tests/test_default_header_middleware.py +313 -313
  231. tests/test_distributed.py +65 -65
  232. tests/test_double_crawlo_fix.py +204 -204
  233. tests/test_double_crawlo_fix_simple.py +124 -124
  234. tests/test_download_delay_middleware.py +221 -221
  235. tests/test_downloader_proxy_compatibility.py +268 -268
  236. tests/test_dynamic_downloaders_proxy.py +124 -124
  237. tests/test_dynamic_proxy.py +92 -92
  238. tests/test_dynamic_proxy_config.py +146 -146
  239. tests/test_dynamic_proxy_real.py +109 -109
  240. tests/test_edge_cases.py +303 -303
  241. tests/test_enhanced_error_handler.py +270 -270
  242. tests/test_enhanced_error_handler_comprehensive.py +245 -245
  243. tests/test_env_config.py +121 -121
  244. tests/test_error_handler_compatibility.py +112 -112
  245. tests/test_factories.py +252 -252
  246. tests/test_final_validation.py +153 -153
  247. tests/test_fingerprint_consistency.py +135 -135
  248. tests/test_fingerprint_simple.py +51 -51
  249. tests/test_framework_env_usage.py +103 -103
  250. tests/test_framework_logger.py +66 -66
  251. tests/test_framework_startup.py +64 -64
  252. tests/test_get_component_logger.py +83 -83
  253. tests/test_hash_performance.py +99 -99
  254. tests/test_integration.py +169 -169
  255. tests/test_item_dedup_redis_key.py +122 -122
  256. tests/test_large_scale_config.py +112 -112
  257. tests/test_large_scale_helper.py +235 -235
  258. tests/test_logging_enhancements.py +375 -0
  259. tests/test_logging_final.py +185 -0
  260. tests/test_logging_integration.py +313 -0
  261. tests/test_logging_system.py +282 -282
  262. tests/test_middleware_debug.py +142 -0
  263. tests/test_mode_change.py +72 -72
  264. tests/test_mode_consistency.py +51 -51
  265. tests/test_offsite_middleware.py +244 -244
  266. tests/test_offsite_middleware_simple.py +203 -203
  267. tests/test_parsel.py +29 -29
  268. tests/test_performance.py +327 -327
  269. tests/test_performance_monitor.py +115 -115
  270. tests/test_pipeline_fingerprint_consistency.py +86 -86
  271. tests/test_priority_behavior.py +212 -0
  272. tests/test_priority_consistency.py +152 -0
  273. tests/test_priority_consistency_fixed.py +250 -0
  274. tests/test_proxy_api.py +264 -264
  275. tests/test_proxy_health_check.py +32 -32
  276. tests/test_proxy_middleware.py +121 -121
  277. tests/test_proxy_middleware_enhanced.py +216 -216
  278. tests/test_proxy_middleware_integration.py +136 -136
  279. tests/test_proxy_middleware_refactored.py +184 -184
  280. tests/test_proxy_providers.py +56 -56
  281. tests/test_proxy_stats.py +19 -19
  282. tests/test_proxy_strategies.py +59 -59
  283. tests/test_queue_empty_check.py +41 -41
  284. tests/test_queue_manager_double_crawlo.py +173 -173
  285. tests/test_queue_manager_redis_key.py +179 -179
  286. tests/test_queue_naming.py +154 -154
  287. tests/test_queue_type.py +106 -106
  288. tests/test_queue_type_redis_config_consistency.py +131 -0
  289. tests/test_random_headers_default.py +323 -0
  290. tests/test_random_headers_necessity.py +309 -0
  291. tests/test_random_user_agent.py +72 -72
  292. tests/test_real_scenario_proxy.py +195 -195
  293. tests/test_redis_config.py +28 -28
  294. tests/test_redis_connection_pool.py +294 -294
  295. tests/test_redis_key_naming.py +181 -181
  296. tests/test_redis_key_validator.py +123 -123
  297. tests/test_redis_queue.py +224 -224
  298. tests/test_redis_queue_name_fix.py +175 -175
  299. tests/test_redis_queue_type_fallback.py +130 -0
  300. tests/test_request_ignore_middleware.py +182 -182
  301. tests/test_request_params.py +111 -111
  302. tests/test_request_serialization.py +70 -70
  303. tests/test_response_code_middleware.py +349 -349
  304. tests/test_response_filter_middleware.py +427 -427
  305. tests/test_response_improvements.py +152 -152
  306. tests/test_retry_middleware.py +334 -242
  307. tests/test_retry_middleware_realistic.py +274 -0
  308. tests/test_scheduler.py +252 -252
  309. tests/test_scheduler_config_update.py +133 -133
  310. tests/test_simple_response.py +61 -61
  311. tests/test_telecom_spider_redis_key.py +205 -205
  312. tests/test_template_content.py +87 -87
  313. tests/test_template_redis_key.py +134 -134
  314. tests/test_tools.py +159 -159
  315. tests/test_user_agent_randomness.py +177 -0
  316. tests/test_user_agents.py +96 -96
  317. tests/tools_example.py +260 -260
  318. tests/untested_features_report.md +138 -138
  319. tests/verify_debug.py +51 -51
  320. tests/verify_distributed.py +117 -117
  321. tests/verify_log_fix.py +111 -111
  322. crawlo-1.4.1.dist-info/METADATA +0 -1199
  323. crawlo-1.4.1.dist-info/RECORD +0 -309
  324. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/WHEEL +0 -0
  325. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/entry_points.txt +0 -0
  326. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/top_level.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,651 +1,651 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- 重构后的Crawler系统
5
- ==================
6
-
7
- 设计原则:
8
- 1. 单一职责 - 每个类只负责一个明确的功能
9
- 2. 依赖注入 - 通过工厂创建组件,便于测试
10
- 3. 状态管理 - 清晰的状态转换和生命周期
11
- 4. 错误处理 - 优雅的错误处理和恢复机制
12
- """
13
-
14
- import asyncio
15
- import time
16
- from contextlib import asynccontextmanager
17
- from dataclasses import dataclass
18
- from enum import Enum
19
- from typing import Optional, Type, Dict, Any, List
20
-
21
- from crawlo.factories import get_component_registry
22
- from crawlo.initialization import initialize_framework, is_framework_ready
23
- from crawlo.logging import get_logger
24
-
25
-
26
- class CrawlerState(Enum):
27
- """Crawler状态枚举"""
28
- CREATED = "created"
29
- INITIALIZING = "initializing"
30
- READY = "ready"
31
- RUNNING = "running"
32
- CLOSING = "closing"
33
- CLOSED = "closed"
34
- ERROR = "error"
35
-
36
-
37
- @dataclass
38
- class CrawlerMetrics:
39
- """Crawler性能指标"""
40
- start_time: Optional[float] = None
41
- end_time: Optional[float] = None
42
- initialization_duration: float = 0.0
43
- crawl_duration: float = 0.0
44
- request_count: int = 0
45
- success_count: int = 0
46
- error_count: int = 0
47
-
48
- def get_total_duration(self) -> float:
49
- if self.start_time and self.end_time:
50
- return self.end_time - self.start_time
51
- return 0.0
52
-
53
- def get_success_rate(self) -> float:
54
- total = self.success_count + self.error_count
55
- return (self.success_count / total * 100) if total > 0 else 0.0
56
-
57
-
58
- class ModernCrawler:
59
- """
60
- 现代化的Crawler实现
61
-
62
- 特点:
63
- 1. 清晰的状态管理
64
- 2. 依赖注入
65
- 3. 组件化架构
66
- 4. 完善的错误处理
67
- """
68
-
69
- def __init__(self, spider_cls: Type, settings=None):
70
- self._spider_cls = spider_cls
71
- self._settings = settings
72
- self._state = CrawlerState.CREATED
73
- self._state_lock = asyncio.Lock()
74
-
75
- # 组件
76
- self._spider = None
77
- self._engine = None
78
- self._stats = None
79
- self._subscriber = None
80
- self._extension = None
81
-
82
- # 指标
83
- self._metrics = CrawlerMetrics()
84
-
85
- # 日志
86
- self._logger = get_logger(f'crawler.{spider_cls.__name__ if spider_cls else "unknown"}')
87
-
88
- # 确保框架已初始化
89
- self._ensure_framework_ready()
90
-
91
- def _ensure_framework_ready(self):
92
- """确保框架已准备就绪"""
93
- if not is_framework_ready():
94
- try:
95
- self._settings = initialize_framework(self._settings)
96
- self._logger.debug("Framework initialized successfully")
97
- except Exception as e:
98
- self._logger.warning(f"Framework initialization failed: {e}")
99
- # 使用降级策略
100
- if not self._settings:
101
- from crawlo.settings.setting_manager import SettingManager
102
- self._settings = SettingManager()
103
-
104
- # 确保是SettingManager实例
105
- if isinstance(self._settings, dict):
106
- from crawlo.settings.setting_manager import SettingManager
107
- settings_manager = SettingManager()
108
- settings_manager.update_attributes(self._settings)
109
- self._settings = settings_manager
110
-
111
- @property
112
- def state(self) -> CrawlerState:
113
- """获取当前状态"""
114
- return self._state
115
-
116
- @property
117
- def spider(self):
118
- """获取Spider实例"""
119
- return self._spider
120
-
121
- @property
122
- def stats(self):
123
- """获取Stats实例(向后兼容)"""
124
- return self._stats
125
-
126
- @property
127
- def metrics(self) -> CrawlerMetrics:
128
- """获取性能指标"""
129
- return self._metrics
130
-
131
- @property
132
- def settings(self):
133
- """获取配置"""
134
- return self._settings
135
-
136
- @property
137
- def engine(self):
138
- """获取Engine实例(向后兼容)"""
139
- return self._engine
140
-
141
- @property
142
- def subscriber(self):
143
- """获取Subscriber实例(向后兼容)"""
144
- return self._subscriber
145
-
146
- @property
147
- def extension(self):
148
- """获取Extension实例(向后兼容)"""
149
- return self._extension
150
-
151
- @extension.setter
152
- def extension(self, value):
153
- """设置Extension实例(向后兼容)"""
154
- self._extension = value
155
-
156
- def _create_extension(self):
157
- """创建Extension管理器(向后兼容)"""
158
- if self._extension is None:
159
- try:
160
- registry = get_component_registry()
161
- self._extension = registry.create('extension_manager', crawler=self)
162
- except Exception as e:
163
- self._logger.warning(f"Failed to create extension manager: {e}")
164
- return self._extension
165
-
166
- async def close(self):
167
- """关闭爹虫(向后兼容)"""
168
- await self._cleanup()
169
-
170
- async def crawl(self):
171
- """执行爬取任务"""
172
- async with self._lifecycle_manager():
173
- await self._initialize_components()
174
- await self._run_crawler()
175
-
176
- @asynccontextmanager
177
- async def _lifecycle_manager(self):
178
- """生命周期管理"""
179
- self._metrics.start_time = time.time()
180
-
181
- try:
182
- yield
183
- except Exception as e:
184
- await self._handle_error(e)
185
- raise
186
- finally:
187
- await self._cleanup()
188
- self._metrics.end_time = time.time()
189
-
190
- async def _initialize_components(self):
191
- """初始化组件"""
192
- async with self._state_lock:
193
- if self._state != CrawlerState.CREATED:
194
- raise RuntimeError(f"Cannot initialize from state {self._state}")
195
-
196
- self._state = CrawlerState.INITIALIZING
197
-
198
- init_start = time.time()
199
-
200
- try:
201
- # 使用组件工厂创建组件
202
- registry = get_component_registry()
203
-
204
- # 创建Subscriber(无依赖)
205
- self._subscriber = registry.create('subscriber')
206
-
207
- # 创建Spider
208
- self._spider = self._create_spider()
209
-
210
- # 创建Engine(需要crawler参数)
211
- self._engine = registry.create('engine', crawler=self)
212
-
213
- # 创建Stats(需要crawler参数)
214
- self._stats = registry.create('stats', crawler=self)
215
-
216
- # 创建Extension Manager (可选,需要crawler参数)
217
- try:
218
- self._extension = registry.create('extension_manager', crawler=self)
219
- except Exception as e:
220
- self._logger.warning(f"Failed to create extension manager: {e}")
221
-
222
- self._metrics.initialization_duration = time.time() - init_start
223
-
224
- async with self._state_lock:
225
- self._state = CrawlerState.READY
226
-
227
- self._logger.debug(f"Crawler components initialized successfully in {self._metrics.initialization_duration:.2f}s")
228
-
229
- except Exception as e:
230
- async with self._state_lock:
231
- self._state = CrawlerState.ERROR
232
- raise RuntimeError(f"Component initialization failed: {e}")
233
-
234
- def _create_spider(self):
235
- """创建Spider实例"""
236
- if not self._spider_cls:
237
- raise ValueError("Spider class not provided")
238
-
239
- # 检查Spider类的有效性
240
- if not hasattr(self._spider_cls, 'name'):
241
- raise ValueError("Spider class must have 'name' attribute")
242
-
243
- # 创建Spider实例
244
- spider = self._spider_cls()
245
-
246
- # 设置crawler引用
247
- if hasattr(spider, 'crawler'):
248
- spider.crawler = self
249
-
250
- return spider
251
-
252
- async def _run_crawler(self):
253
- """运行爬虫引擎"""
254
- async with self._state_lock:
255
- if self._state != CrawlerState.READY:
256
- raise RuntimeError(f"Cannot run from state {self._state}")
257
-
258
- self._state = CrawlerState.RUNNING
259
-
260
- crawl_start = time.time()
261
-
262
- try:
263
- # 启动引擎
264
- if self._engine:
265
- await self._engine.start_spider(self._spider)
266
- else:
267
- raise RuntimeError("Engine not initialized")
268
-
269
- self._metrics.crawl_duration = time.time() - crawl_start
270
-
271
- self._logger.info(f"Crawler completed successfully in {self._metrics.crawl_duration:.2f}s")
272
-
273
- except Exception as e:
274
- self._metrics.crawl_duration = time.time() - crawl_start
275
- raise RuntimeError(f"Crawler execution failed: {e}")
276
-
277
- async def _handle_error(self, error: Exception):
278
- """处理错误"""
279
- async with self._state_lock:
280
- self._state = CrawlerState.ERROR
281
-
282
- self._metrics.error_count += 1
283
- self._logger.error(f"Crawler error: {error}", exc_info=True)
284
-
285
- # 这里可以添加错误恢复逻辑
286
-
287
- async def _cleanup(self):
288
- """清理资源"""
289
- async with self._state_lock:
290
- if self._state not in [CrawlerState.CLOSING, CrawlerState.CLOSED]:
291
- self._state = CrawlerState.CLOSING
292
-
293
- try:
294
- # 关闭各个组件
295
- if self._engine and hasattr(self._engine, 'close'):
296
- try:
297
- await self._engine.close()
298
- except Exception as e:
299
- self._logger.warning(f"Engine cleanup failed: {e}")
300
-
301
- # 调用Spider的spider_closed方法
302
- if self._spider:
303
- try:
304
- if asyncio.iscoroutinefunction(self._spider.spider_closed):
305
- await self._spider.spider_closed()
306
- else:
307
- self._spider.spider_closed()
308
- except Exception as e:
309
- self._logger.warning(f"Spider cleanup failed: {e}")
310
-
311
- # 调用StatsCollector的close_spider方法,设置reason和spider_name
312
- if self._stats and hasattr(self._stats, 'close_spider'):
313
- try:
314
- # 使用默认的'finished'作为reason
315
- self._stats.close_spider(self._spider, reason='finished')
316
- except Exception as e:
317
- self._logger.warning(f"Stats close_spider failed: {e}")
318
-
319
- # 触发spider_closed事件,通知所有订阅者(包括扩展)
320
- # 传递reason参数,这里使用默认的'finished'作为reason
321
- await self.subscriber.notify("spider_closed", reason='finished')
322
-
323
- if self._stats and hasattr(self._stats, 'close'):
324
- try:
325
- close_result = self._stats.close()
326
- if asyncio.iscoroutine(close_result):
327
- await close_result
328
- except Exception as e:
329
- self._logger.warning(f"Stats cleanup failed: {e}")
330
-
331
- async with self._state_lock:
332
- self._state = CrawlerState.CLOSED
333
-
334
- self._logger.debug("Crawler cleanup completed")
335
-
336
- except Exception as e:
337
- self._logger.error(f"Cleanup error: {e}")
338
-
339
-
340
- class CrawlerProcess:
341
- """
342
- Crawler进程管理器 - 管理多个Crawler的执行
343
-
344
- 简化版本,专注于核心功能
345
- """
346
-
347
- def __init__(self, settings=None, max_concurrency: int = 3, spider_modules=None):
348
- self._settings = settings or initialize_framework()
349
- self._max_concurrency = max_concurrency
350
- self._crawlers: List[ModernCrawler] = []
351
- self._semaphore = asyncio.Semaphore(max_concurrency)
352
- self._logger = get_logger('crawler.process')
353
- self._spider_modules = spider_modules # 保存spider_modules
354
-
355
- # 如果提供了spider_modules,自动注册这些模块中的爬虫
356
- if spider_modules:
357
- self._register_spider_modules(spider_modules)
358
-
359
- # 指标
360
- self._start_time: Optional[float] = None
361
- self._end_time: Optional[float] = None
362
-
363
- def _register_spider_modules(self, spider_modules):
364
- """注册爬虫模块"""
365
- try:
366
- from crawlo.spider import get_global_spider_registry
367
- registry = get_global_spider_registry()
368
-
369
- self._logger.debug(f"Registering spider modules: {spider_modules}")
370
-
371
- initial_spider_count = len(registry)
372
-
373
- for module_path in spider_modules:
374
- try:
375
- # 导入模块
376
- __import__(module_path)
377
- self._logger.debug(f"Successfully imported spider module: {module_path}")
378
- except ImportError as e:
379
- self._logger.warning(f"Failed to import spider module {module_path}: {e}")
380
- # 如果导入失败,尝试自动发现
381
- self._auto_discover_spider_modules([module_path])
382
-
383
- # 检查注册表中的爬虫
384
- spider_names = list(registry.keys())
385
- self._logger.debug(f"Registered spiders after import: {spider_names}")
386
-
387
- # 如果导入模块后没有新的爬虫被注册,则尝试自动发现
388
- final_spider_count = len(registry)
389
- if final_spider_count == initial_spider_count:
390
- self._logger.debug("No new spiders registered after importing modules, attempting auto-discovery")
391
- self._auto_discover_spider_modules(spider_modules)
392
- spider_names = list(registry.keys())
393
- self._logger.debug(f"Registered spiders after auto-discovery: {spider_names}")
394
- except Exception as e:
395
- self._logger.warning(f"Error registering spider modules: {e}")
396
-
397
- def _auto_discover_spider_modules(self, spider_modules):
398
- """
399
- 自动发现并导入爬虫模块中的所有爬虫
400
- 这个方法会扫描指定模块目录下的所有Python文件并自动导入
401
- """
402
- try:
403
- from crawlo.spider import get_global_spider_registry
404
- import importlib
405
- from pathlib import Path
406
- import sys
407
-
408
- registry = get_global_spider_registry()
409
- initial_spider_count = len(registry)
410
-
411
- for module_path in spider_modules:
412
- try:
413
- # 将模块路径转换为文件系统路径
414
- # 例如: ofweek_standalone.spiders -> ofweek_standalone/spiders
415
- package_parts = module_path.split('.')
416
- if len(package_parts) < 2:
417
- continue
418
-
419
- # 获取项目根目录
420
- project_root = None
421
- for path in sys.path:
422
- if path and Path(path).exists():
423
- possible_module_path = Path(path) / package_parts[0]
424
- if possible_module_path.exists():
425
- project_root = path
426
- break
427
-
428
- if not project_root:
429
- # 尝试使用当前工作目录
430
- project_root = str(Path.cwd())
431
-
432
- # 构建模块目录路径
433
- module_dir = Path(project_root)
434
- for part in package_parts:
435
- module_dir = module_dir / part
436
-
437
- # 如果目录存在,扫描其中的Python文件
438
- if module_dir.exists() and module_dir.is_dir():
439
- # 导入目录下的所有Python文件(除了__init__.py)
440
- for py_file in module_dir.glob("*.py"):
441
- if py_file.name.startswith('_'):
442
- continue
443
-
444
- # 构造模块名
445
- module_name = py_file.stem # 文件名(不含扩展名)
446
- full_module_path = f"{module_path}.{module_name}"
447
-
448
- try:
449
- # 导入模块以触发Spider注册
450
- importlib.import_module(full_module_path)
451
- except ImportError as e:
452
- self._logger.warning(f"Failed to auto-import spider module {full_module_path}: {e}")
453
- except Exception as e:
454
- self._logger.warning(f"Error during auto-discovery for module {module_path}: {e}")
455
-
456
- # 检查是否有新的爬虫被注册
457
- final_spider_count = len(registry)
458
- if final_spider_count > initial_spider_count:
459
- new_spiders = list(registry.keys())
460
- self._logger.info(f"Auto-discovered {final_spider_count - initial_spider_count} new spiders: {new_spiders}")
461
-
462
- except Exception as e:
463
- self._logger.warning(f"Error during auto-discovery of spider modules: {e}")
464
-
465
- def is_spider_registered(self, name: str) -> bool:
466
- """检查爬虫是否已注册"""
467
- from crawlo.spider import get_global_spider_registry
468
- registry = get_global_spider_registry()
469
- return name in registry
470
-
471
- def get_spider_class(self, name: str):
472
- """获取爬虫类"""
473
- from crawlo.spider import get_global_spider_registry
474
- registry = get_global_spider_registry()
475
- return registry.get(name)
476
-
477
- def get_spider_names(self):
478
- """获取所有注册的爬虫名称"""
479
- from crawlo.spider import get_global_spider_registry
480
- registry = get_global_spider_registry()
481
- return list(registry.keys())
482
-
483
- async def crawl(self, spider_cls_or_name, settings=None):
484
- """运行单个爬虫"""
485
- spider_cls = self._resolve_spider_class(spider_cls_or_name)
486
-
487
- # 记录启动的爬虫名称(符合规范要求)
488
- from crawlo.logging import get_logger
489
- logger = get_logger('crawlo.framework')
490
- logger.info(f"Starting spider: {spider_cls.name}")
491
-
492
- merged_settings = self._merge_settings(settings)
493
- crawler = ModernCrawler(spider_cls, merged_settings)
494
-
495
- async with self._semaphore:
496
- await crawler.crawl()
497
-
498
- return crawler
499
-
500
- async def crawl_multiple(self, spider_classes_or_names, settings=None):
501
- """运行多个爬虫"""
502
- self._start_time = time.time()
503
-
504
- try:
505
- spider_classes = []
506
- for cls_or_name in spider_classes_or_names:
507
- spider_cls = self._resolve_spider_class(cls_or_name)
508
- spider_classes.append(spider_cls)
509
-
510
- # 记录启动的爬虫名称(符合规范要求)
511
- spider_names = [cls.name for cls in spider_classes]
512
- from crawlo.logging import get_logger
513
- logger = get_logger('crawlo.framework')
514
- if len(spider_names) == 1:
515
- logger.info(f"Starting spider: {spider_names[0]}")
516
- else:
517
- logger.info(f"Starting spiders: {', '.join(spider_names)}")
518
-
519
- tasks = []
520
- for spider_cls in spider_classes:
521
- merged_settings = self._merge_settings(settings)
522
- crawler = ModernCrawler(spider_cls, merged_settings)
523
- self._crawlers.append(crawler)
524
-
525
- task = asyncio.create_task(self._run_with_semaphore(crawler))
526
- tasks.append(task)
527
-
528
- results = await asyncio.gather(*tasks, return_exceptions=True)
529
-
530
- # 处理结果
531
- successful = sum(1 for r in results if not isinstance(r, Exception))
532
- failed = len(results) - successful
533
-
534
- self._logger.info(f"Crawl completed: {successful} successful, {failed} failed")
535
-
536
- return results
537
-
538
- finally:
539
- self._end_time = time.time()
540
- if self._start_time:
541
- duration = self._end_time - self._start_time
542
- self._logger.info(f"Total execution time: {duration:.2f}s")
543
-
544
- async def _run_with_semaphore(self, crawler: ModernCrawler):
545
- """在信号量控制下运行爬虫"""
546
- async with self._semaphore:
547
- await crawler.crawl()
548
- return crawler
549
-
550
- def _resolve_spider_class(self, spider_cls_or_name):
551
- """解析Spider类"""
552
- if isinstance(spider_cls_or_name, str):
553
- # 从注册表中查找
554
- try:
555
- from crawlo.spider import get_global_spider_registry
556
- registry = get_global_spider_registry()
557
- if spider_cls_or_name in registry:
558
- return registry[spider_cls_or_name]
559
- else:
560
- # 如果在注册表中找不到,尝试通过spider_modules导入所有模块来触发注册
561
- # 然后再次检查注册表
562
- if hasattr(self, '_spider_modules') and self._spider_modules:
563
- for module_path in self._spider_modules:
564
- try:
565
- # 导入模块来触发爬虫注册
566
- __import__(module_path)
567
- except ImportError:
568
- pass # 忽略导入错误
569
-
570
- # 再次检查注册表
571
- if spider_cls_or_name in registry:
572
- return registry[spider_cls_or_name]
573
-
574
- # 如果仍然找不到,尝试自动发现模式
575
- if hasattr(self, '_spider_modules') and self._spider_modules:
576
- self._auto_discover_spider_modules(self._spider_modules)
577
- if spider_cls_or_name in registry:
578
- return registry[spider_cls_or_name]
579
-
580
- # 如果仍然找不到,尝试直接导入模块
581
- try:
582
- # 假设格式为 module.SpiderClass
583
- if '.' in spider_cls_or_name:
584
- module_path, class_name = spider_cls_or_name.rsplit('.', 1)
585
- module = __import__(module_path, fromlist=[class_name])
586
- spider_class = getattr(module, class_name)
587
- # 注册到全局注册表
588
- registry[spider_class.name] = spider_class
589
- return spider_class
590
- else:
591
- # 尝试在spider_modules中查找
592
- if hasattr(self, '_spider_modules') and self._spider_modules:
593
- for module_path in self._spider_modules:
594
- try:
595
- # 构造完整的模块路径
596
- full_module_path = f"{module_path}.{spider_cls_or_name}"
597
- module = __import__(full_module_path, fromlist=[spider_cls_or_name])
598
- # 获取模块中的Spider子类
599
- for attr_name in dir(module):
600
- attr_value = getattr(module, attr_name)
601
- if (isinstance(attr_value, type) and
602
- issubclass(attr_value, registry.__class__.__bases__[0]) and
603
- hasattr(attr_value, 'name') and
604
- attr_value.name == spider_cls_or_name):
605
- # 注册到全局注册表
606
- registry[spider_cls_or_name] = attr_value
607
- return attr_value
608
- except ImportError:
609
- continue
610
- raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
611
- except (ImportError, AttributeError):
612
- raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
613
- except ImportError:
614
- raise ValueError(f"Cannot resolve spider name '{spider_cls_or_name}'")
615
- else:
616
- return spider_cls_or_name
617
-
618
- def _merge_settings(self, additional_settings):
619
- """合并配置"""
620
- if not additional_settings:
621
- return self._settings
622
-
623
- # 这里可以实现更复杂的配置合并逻辑
624
- from crawlo.settings.setting_manager import SettingManager
625
- merged = SettingManager()
626
-
627
- # 复制基础配置
628
- if self._settings:
629
- merged.update_attributes(self._settings.__dict__)
630
-
631
- # 应用额外配置
632
- merged.update_attributes(additional_settings)
633
-
634
- return merged
635
-
636
- def get_metrics(self) -> Dict[str, Any]:
637
- """获取整体指标"""
638
- total_duration = 0.0
639
- if self._start_time and self._end_time:
640
- total_duration = self._end_time - self._start_time
641
-
642
- crawler_metrics = [crawler.metrics for crawler in self._crawlers]
643
-
644
- return {
645
- 'total_duration': total_duration,
646
- 'crawler_count': len(self._crawlers),
647
- 'total_requests': sum(m.request_count for m in crawler_metrics),
648
- 'total_success': sum(m.success_count for m in crawler_metrics),
649
- 'total_errors': sum(m.error_count for m in crawler_metrics),
650
- 'average_success_rate': sum(m.get_success_rate() for m in crawler_metrics) / len(crawler_metrics) if crawler_metrics else 0.0
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 重构后的Crawler系统
5
+ ==================
6
+
7
+ 设计原则:
8
+ 1. 单一职责 - 每个类只负责一个明确的功能
9
+ 2. 依赖注入 - 通过工厂创建组件,便于测试
10
+ 3. 状态管理 - 清晰的状态转换和生命周期
11
+ 4. 错误处理 - 优雅的错误处理和恢复机制
12
+ """
13
+
14
+ import asyncio
15
+ import time
16
+ from contextlib import asynccontextmanager
17
+ from dataclasses import dataclass
18
+ from enum import Enum
19
+ from typing import Optional, Type, Dict, Any, List
20
+
21
+ from crawlo.factories import get_component_registry
22
+ from crawlo.initialization import initialize_framework, is_framework_ready
23
+ from crawlo.logging import get_logger
24
+
25
+
26
+ class CrawlerState(Enum):
27
+ """Crawler状态枚举"""
28
+ CREATED = "created"
29
+ INITIALIZING = "initializing"
30
+ READY = "ready"
31
+ RUNNING = "running"
32
+ CLOSING = "closing"
33
+ CLOSED = "closed"
34
+ ERROR = "error"
35
+
36
+
37
+ @dataclass
38
+ class CrawlerMetrics:
39
+ """Crawler性能指标"""
40
+ start_time: Optional[float] = None
41
+ end_time: Optional[float] = None
42
+ initialization_duration: float = 0.0
43
+ crawl_duration: float = 0.0
44
+ request_count: int = 0
45
+ success_count: int = 0
46
+ error_count: int = 0
47
+
48
+ def get_total_duration(self) -> float:
49
+ if self.start_time and self.end_time:
50
+ return self.end_time - self.start_time
51
+ return 0.0
52
+
53
+ def get_success_rate(self) -> float:
54
+ total = self.success_count + self.error_count
55
+ return (self.success_count / total * 100) if total > 0 else 0.0
56
+
57
+
58
+ class ModernCrawler:
59
+ """
60
+ 现代化的Crawler实现
61
+
62
+ 特点:
63
+ 1. 清晰的状态管理
64
+ 2. 依赖注入
65
+ 3. 组件化架构
66
+ 4. 完善的错误处理
67
+ """
68
+
69
+ def __init__(self, spider_cls: Type, settings=None):
70
+ self._spider_cls = spider_cls
71
+ self._settings = settings
72
+ self._state = CrawlerState.CREATED
73
+ self._state_lock = asyncio.Lock()
74
+
75
+ # 组件
76
+ self._spider = None
77
+ self._engine = None
78
+ self._stats = None
79
+ self._subscriber = None
80
+ self._extension = None
81
+
82
+ # 指标
83
+ self._metrics = CrawlerMetrics()
84
+
85
+ # 日志
86
+ self._logger = get_logger(f'crawler.{spider_cls.__name__ if spider_cls else "unknown"}')
87
+
88
+ # 确保框架已初始化
89
+ self._ensure_framework_ready()
90
+
91
+ def _ensure_framework_ready(self):
92
+ """确保框架已准备就绪"""
93
+ if not is_framework_ready():
94
+ try:
95
+ self._settings = initialize_framework(self._settings)
96
+ self._logger.debug("Framework initialized successfully")
97
+ except Exception as e:
98
+ self._logger.warning(f"Framework initialization failed: {e}")
99
+ # 使用降级策略
100
+ if not self._settings:
101
+ from crawlo.settings.setting_manager import SettingManager
102
+ self._settings = SettingManager()
103
+
104
+ # 确保是SettingManager实例
105
+ if isinstance(self._settings, dict):
106
+ from crawlo.settings.setting_manager import SettingManager
107
+ settings_manager = SettingManager()
108
+ settings_manager.update_attributes(self._settings)
109
+ self._settings = settings_manager
110
+
111
+ @property
112
+ def state(self) -> CrawlerState:
113
+ """获取当前状态"""
114
+ return self._state
115
+
116
+ @property
117
+ def spider(self):
118
+ """获取Spider实例"""
119
+ return self._spider
120
+
121
+ @property
122
+ def stats(self):
123
+ """获取Stats实例(向后兼容)"""
124
+ return self._stats
125
+
126
+ @property
127
+ def metrics(self) -> CrawlerMetrics:
128
+ """获取性能指标"""
129
+ return self._metrics
130
+
131
+ @property
132
+ def settings(self):
133
+ """获取配置"""
134
+ return self._settings
135
+
136
+ @property
137
+ def engine(self):
138
+ """获取Engine实例(向后兼容)"""
139
+ return self._engine
140
+
141
+ @property
142
+ def subscriber(self):
143
+ """获取Subscriber实例(向后兼容)"""
144
+ return self._subscriber
145
+
146
+ @property
147
+ def extension(self):
148
+ """获取Extension实例(向后兼容)"""
149
+ return self._extension
150
+
151
+ @extension.setter
152
+ def extension(self, value):
153
+ """设置Extension实例(向后兼容)"""
154
+ self._extension = value
155
+
156
+ def _create_extension(self):
157
+ """创建Extension管理器(向后兼容)"""
158
+ if self._extension is None:
159
+ try:
160
+ registry = get_component_registry()
161
+ self._extension = registry.create('extension_manager', crawler=self)
162
+ except Exception as e:
163
+ self._logger.warning(f"Failed to create extension manager: {e}")
164
+ return self._extension
165
+
166
+ async def close(self):
167
+ """关闭爹虫(向后兼容)"""
168
+ await self._cleanup()
169
+
170
+ async def crawl(self):
171
+ """执行爬取任务"""
172
+ async with self._lifecycle_manager():
173
+ await self._initialize_components()
174
+ await self._run_crawler()
175
+
176
+ @asynccontextmanager
177
+ async def _lifecycle_manager(self):
178
+ """生命周期管理"""
179
+ self._metrics.start_time = time.time()
180
+
181
+ try:
182
+ yield
183
+ except Exception as e:
184
+ await self._handle_error(e)
185
+ raise
186
+ finally:
187
+ await self._cleanup()
188
+ self._metrics.end_time = time.time()
189
+
190
+ async def _initialize_components(self):
191
+ """初始化组件"""
192
+ async with self._state_lock:
193
+ if self._state != CrawlerState.CREATED:
194
+ raise RuntimeError(f"Cannot initialize from state {self._state}")
195
+
196
+ self._state = CrawlerState.INITIALIZING
197
+
198
+ init_start = time.time()
199
+
200
+ try:
201
+ # 使用组件工厂创建组件
202
+ registry = get_component_registry()
203
+
204
+ # 创建Subscriber(无依赖)
205
+ self._subscriber = registry.create('subscriber')
206
+
207
+ # 创建Spider
208
+ self._spider = self._create_spider()
209
+
210
+ # 创建Engine(需要crawler参数)
211
+ self._engine = registry.create('engine', crawler=self)
212
+
213
+ # 创建Stats(需要crawler参数)
214
+ self._stats = registry.create('stats', crawler=self)
215
+
216
+ # 创建Extension Manager (可选,需要crawler参数)
217
+ try:
218
+ self._extension = registry.create('extension_manager', crawler=self)
219
+ except Exception as e:
220
+ self._logger.warning(f"Failed to create extension manager: {e}")
221
+
222
+ self._metrics.initialization_duration = time.time() - init_start
223
+
224
+ async with self._state_lock:
225
+ self._state = CrawlerState.READY
226
+
227
+ self._logger.debug(f"Crawler components initialized successfully in {self._metrics.initialization_duration:.2f}s")
228
+
229
+ except Exception as e:
230
+ async with self._state_lock:
231
+ self._state = CrawlerState.ERROR
232
+ raise RuntimeError(f"Component initialization failed: {e}")
233
+
234
+ def _create_spider(self):
235
+ """创建Spider实例"""
236
+ if not self._spider_cls:
237
+ raise ValueError("Spider class not provided")
238
+
239
+ # 检查Spider类的有效性
240
+ if not hasattr(self._spider_cls, 'name'):
241
+ raise ValueError("Spider class must have 'name' attribute")
242
+
243
+ # 创建Spider实例
244
+ spider = self._spider_cls()
245
+
246
+ # 设置crawler引用
247
+ if hasattr(spider, 'crawler'):
248
+ spider.crawler = self
249
+
250
+ return spider
251
+
252
+ async def _run_crawler(self):
253
+ """运行爬虫引擎"""
254
+ async with self._state_lock:
255
+ if self._state != CrawlerState.READY:
256
+ raise RuntimeError(f"Cannot run from state {self._state}")
257
+
258
+ self._state = CrawlerState.RUNNING
259
+
260
+ crawl_start = time.time()
261
+
262
+ try:
263
+ # 启动引擎
264
+ if self._engine:
265
+ await self._engine.start_spider(self._spider)
266
+ else:
267
+ raise RuntimeError("Engine not initialized")
268
+
269
+ self._metrics.crawl_duration = time.time() - crawl_start
270
+
271
+ self._logger.info(f"Crawler completed successfully in {self._metrics.crawl_duration:.2f}s")
272
+
273
+ except Exception as e:
274
+ self._metrics.crawl_duration = time.time() - crawl_start
275
+ raise RuntimeError(f"Crawler execution failed: {e}")
276
+
277
+ async def _handle_error(self, error: Exception):
278
+ """处理错误"""
279
+ async with self._state_lock:
280
+ self._state = CrawlerState.ERROR
281
+
282
+ self._metrics.error_count += 1
283
+ self._logger.error(f"Crawler error: {error}", exc_info=True)
284
+
285
+ # 这里可以添加错误恢复逻辑
286
+
287
+ async def _cleanup(self):
288
+ """清理资源"""
289
+ async with self._state_lock:
290
+ if self._state not in [CrawlerState.CLOSING, CrawlerState.CLOSED]:
291
+ self._state = CrawlerState.CLOSING
292
+
293
+ try:
294
+ # 关闭各个组件
295
+ if self._engine and hasattr(self._engine, 'close'):
296
+ try:
297
+ await self._engine.close()
298
+ except Exception as e:
299
+ self._logger.warning(f"Engine cleanup failed: {e}")
300
+
301
+ # 调用Spider的spider_closed方法
302
+ if self._spider:
303
+ try:
304
+ if asyncio.iscoroutinefunction(self._spider.spider_closed):
305
+ await self._spider.spider_closed()
306
+ else:
307
+ self._spider.spider_closed()
308
+ except Exception as e:
309
+ self._logger.warning(f"Spider cleanup failed: {e}")
310
+
311
+ # 调用StatsCollector的close_spider方法,设置reason和spider_name
312
+ if self._stats and hasattr(self._stats, 'close_spider'):
313
+ try:
314
+ # 使用默认的'finished'作为reason
315
+ self._stats.close_spider(self._spider, reason='finished')
316
+ except Exception as e:
317
+ self._logger.warning(f"Stats close_spider failed: {e}")
318
+
319
+ # 触发spider_closed事件,通知所有订阅者(包括扩展)
320
+ # 传递reason参数,这里使用默认的'finished'作为reason
321
+ await self.subscriber.notify("spider_closed", reason='finished')
322
+
323
+ if self._stats and hasattr(self._stats, 'close'):
324
+ try:
325
+ close_result = self._stats.close()
326
+ if asyncio.iscoroutine(close_result):
327
+ await close_result
328
+ except Exception as e:
329
+ self._logger.warning(f"Stats cleanup failed: {e}")
330
+
331
+ async with self._state_lock:
332
+ self._state = CrawlerState.CLOSED
333
+
334
+ self._logger.debug("Crawler cleanup completed")
335
+
336
+ except Exception as e:
337
+ self._logger.error(f"Cleanup error: {e}")
338
+
339
+
340
+ class CrawlerProcess:
341
+ """
342
+ Crawler进程管理器 - 管理多个Crawler的执行
343
+
344
+ 简化版本,专注于核心功能
345
+ """
346
+
347
+ def __init__(self, settings=None, max_concurrency: int = 3, spider_modules=None):
348
+ self._settings = settings or initialize_framework()
349
+ self._max_concurrency = max_concurrency
350
+ self._crawlers: List[ModernCrawler] = []
351
+ self._semaphore = asyncio.Semaphore(max_concurrency)
352
+ self._logger = get_logger('crawler.process')
353
+ self._spider_modules = spider_modules # 保存spider_modules
354
+
355
+ # 如果提供了spider_modules,自动注册这些模块中的爬虫
356
+ if spider_modules:
357
+ self._register_spider_modules(spider_modules)
358
+
359
+ # 指标
360
+ self._start_time: Optional[float] = None
361
+ self._end_time: Optional[float] = None
362
+
363
+ def _register_spider_modules(self, spider_modules):
364
+ """注册爬虫模块"""
365
+ try:
366
+ from crawlo.spider import get_global_spider_registry
367
+ registry = get_global_spider_registry()
368
+
369
+ self._logger.debug(f"Registering spider modules: {spider_modules}")
370
+
371
+ initial_spider_count = len(registry)
372
+
373
+ for module_path in spider_modules:
374
+ try:
375
+ # 导入模块
376
+ __import__(module_path)
377
+ self._logger.debug(f"Successfully imported spider module: {module_path}")
378
+ except ImportError as e:
379
+ self._logger.warning(f"Failed to import spider module {module_path}: {e}")
380
+ # 如果导入失败,尝试自动发现
381
+ self._auto_discover_spider_modules([module_path])
382
+
383
+ # 检查注册表中的爬虫
384
+ spider_names = list(registry.keys())
385
+ self._logger.debug(f"Registered spiders after import: {spider_names}")
386
+
387
+ # 如果导入模块后没有新的爬虫被注册,则尝试自动发现
388
+ final_spider_count = len(registry)
389
+ if final_spider_count == initial_spider_count:
390
+ self._logger.debug("No new spiders registered after importing modules, attempting auto-discovery")
391
+ self._auto_discover_spider_modules(spider_modules)
392
+ spider_names = list(registry.keys())
393
+ self._logger.debug(f"Registered spiders after auto-discovery: {spider_names}")
394
+ except Exception as e:
395
+ self._logger.warning(f"Error registering spider modules: {e}")
396
+
397
+ def _auto_discover_spider_modules(self, spider_modules):
398
+ """
399
+ 自动发现并导入爬虫模块中的所有爬虫
400
+ 这个方法会扫描指定模块目录下的所有Python文件并自动导入
401
+ """
402
+ try:
403
+ from crawlo.spider import get_global_spider_registry
404
+ import importlib
405
+ from pathlib import Path
406
+ import sys
407
+
408
+ registry = get_global_spider_registry()
409
+ initial_spider_count = len(registry)
410
+
411
+ for module_path in spider_modules:
412
+ try:
413
+ # 将模块路径转换为文件系统路径
414
+ # 例如: ofweek_standalone.spiders -> ofweek_standalone/spiders
415
+ package_parts = module_path.split('.')
416
+ if len(package_parts) < 2:
417
+ continue
418
+
419
+ # 获取项目根目录
420
+ project_root = None
421
+ for path in sys.path:
422
+ if path and Path(path).exists():
423
+ possible_module_path = Path(path) / package_parts[0]
424
+ if possible_module_path.exists():
425
+ project_root = path
426
+ break
427
+
428
+ if not project_root:
429
+ # 尝试使用当前工作目录
430
+ project_root = str(Path.cwd())
431
+
432
+ # 构建模块目录路径
433
+ module_dir = Path(project_root)
434
+ for part in package_parts:
435
+ module_dir = module_dir / part
436
+
437
+ # 如果目录存在,扫描其中的Python文件
438
+ if module_dir.exists() and module_dir.is_dir():
439
+ # 导入目录下的所有Python文件(除了__init__.py)
440
+ for py_file in module_dir.glob("*.py"):
441
+ if py_file.name.startswith('_'):
442
+ continue
443
+
444
+ # 构造模块名
445
+ module_name = py_file.stem # 文件名(不含扩展名)
446
+ full_module_path = f"{module_path}.{module_name}"
447
+
448
+ try:
449
+ # 导入模块以触发Spider注册
450
+ importlib.import_module(full_module_path)
451
+ except ImportError as e:
452
+ self._logger.warning(f"Failed to auto-import spider module {full_module_path}: {e}")
453
+ except Exception as e:
454
+ self._logger.warning(f"Error during auto-discovery for module {module_path}: {e}")
455
+
456
+ # 检查是否有新的爬虫被注册
457
+ final_spider_count = len(registry)
458
+ if final_spider_count > initial_spider_count:
459
+ new_spiders = list(registry.keys())
460
+ self._logger.info(f"Auto-discovered {final_spider_count - initial_spider_count} new spiders: {new_spiders}")
461
+
462
+ except Exception as e:
463
+ self._logger.warning(f"Error during auto-discovery of spider modules: {e}")
464
+
465
+ def is_spider_registered(self, name: str) -> bool:
466
+ """检查爬虫是否已注册"""
467
+ from crawlo.spider import get_global_spider_registry
468
+ registry = get_global_spider_registry()
469
+ return name in registry
470
+
471
+ def get_spider_class(self, name: str):
472
+ """获取爬虫类"""
473
+ from crawlo.spider import get_global_spider_registry
474
+ registry = get_global_spider_registry()
475
+ return registry.get(name)
476
+
477
+ def get_spider_names(self):
478
+ """获取所有注册的爬虫名称"""
479
+ from crawlo.spider import get_global_spider_registry
480
+ registry = get_global_spider_registry()
481
+ return list(registry.keys())
482
+
483
+ async def crawl(self, spider_cls_or_name, settings=None):
484
+ """运行单个爬虫"""
485
+ spider_cls = self._resolve_spider_class(spider_cls_or_name)
486
+
487
+ # 记录启动的爬虫名称(符合规范要求)
488
+ from crawlo.logging import get_logger
489
+ logger = get_logger('crawlo.framework')
490
+ logger.info(f"Starting spider: {spider_cls.name}")
491
+
492
+ merged_settings = self._merge_settings(settings)
493
+ crawler = ModernCrawler(spider_cls, merged_settings)
494
+
495
+ async with self._semaphore:
496
+ await crawler.crawl()
497
+
498
+ return crawler
499
+
500
+ async def crawl_multiple(self, spider_classes_or_names, settings=None):
501
+ """运行多个爬虫"""
502
+ self._start_time = time.time()
503
+
504
+ try:
505
+ spider_classes = []
506
+ for cls_or_name in spider_classes_or_names:
507
+ spider_cls = self._resolve_spider_class(cls_or_name)
508
+ spider_classes.append(spider_cls)
509
+
510
+ # 记录启动的爬虫名称(符合规范要求)
511
+ spider_names = [cls.name for cls in spider_classes]
512
+ from crawlo.logging import get_logger
513
+ logger = get_logger('crawlo.framework')
514
+ if len(spider_names) == 1:
515
+ logger.info(f"Starting spider: {spider_names[0]}")
516
+ else:
517
+ logger.info(f"Starting spiders: {', '.join(spider_names)}")
518
+
519
+ tasks = []
520
+ for spider_cls in spider_classes:
521
+ merged_settings = self._merge_settings(settings)
522
+ crawler = ModernCrawler(spider_cls, merged_settings)
523
+ self._crawlers.append(crawler)
524
+
525
+ task = asyncio.create_task(self._run_with_semaphore(crawler))
526
+ tasks.append(task)
527
+
528
+ results = await asyncio.gather(*tasks, return_exceptions=True)
529
+
530
+ # 处理结果
531
+ successful = sum(1 for r in results if not isinstance(r, Exception))
532
+ failed = len(results) - successful
533
+
534
+ self._logger.info(f"Crawl completed: {successful} successful, {failed} failed")
535
+
536
+ return results
537
+
538
+ finally:
539
+ self._end_time = time.time()
540
+ if self._start_time:
541
+ duration = self._end_time - self._start_time
542
+ self._logger.info(f"Total execution time: {duration:.2f}s")
543
+
544
+ async def _run_with_semaphore(self, crawler: ModernCrawler):
545
+ """在信号量控制下运行爬虫"""
546
+ async with self._semaphore:
547
+ await crawler.crawl()
548
+ return crawler
549
+
550
+ def _resolve_spider_class(self, spider_cls_or_name):
551
+ """解析Spider类"""
552
+ if isinstance(spider_cls_or_name, str):
553
+ # 从注册表中查找
554
+ try:
555
+ from crawlo.spider import get_global_spider_registry
556
+ registry = get_global_spider_registry()
557
+ if spider_cls_or_name in registry:
558
+ return registry[spider_cls_or_name]
559
+ else:
560
+ # 如果在注册表中找不到,尝试通过spider_modules导入所有模块来触发注册
561
+ # 然后再次检查注册表
562
+ if hasattr(self, '_spider_modules') and self._spider_modules:
563
+ for module_path in self._spider_modules:
564
+ try:
565
+ # 导入模块来触发爬虫注册
566
+ __import__(module_path)
567
+ except ImportError:
568
+ pass # 忽略导入错误
569
+
570
+ # 再次检查注册表
571
+ if spider_cls_or_name in registry:
572
+ return registry[spider_cls_or_name]
573
+
574
+ # 如果仍然找不到,尝试自动发现模式
575
+ if hasattr(self, '_spider_modules') and self._spider_modules:
576
+ self._auto_discover_spider_modules(self._spider_modules)
577
+ if spider_cls_or_name in registry:
578
+ return registry[spider_cls_or_name]
579
+
580
+ # 如果仍然找不到,尝试直接导入模块
581
+ try:
582
+ # 假设格式为 module.SpiderClass
583
+ if '.' in spider_cls_or_name:
584
+ module_path, class_name = spider_cls_or_name.rsplit('.', 1)
585
+ module = __import__(module_path, fromlist=[class_name])
586
+ spider_class = getattr(module, class_name)
587
+ # 注册到全局注册表
588
+ registry[spider_class.name] = spider_class
589
+ return spider_class
590
+ else:
591
+ # 尝试在spider_modules中查找
592
+ if hasattr(self, '_spider_modules') and self._spider_modules:
593
+ for module_path in self._spider_modules:
594
+ try:
595
+ # 构造完整的模块路径
596
+ full_module_path = f"{module_path}.{spider_cls_or_name}"
597
+ module = __import__(full_module_path, fromlist=[spider_cls_or_name])
598
+ # 获取模块中的Spider子类
599
+ for attr_name in dir(module):
600
+ attr_value = getattr(module, attr_name)
601
+ if (isinstance(attr_value, type) and
602
+ issubclass(attr_value, registry.__class__.__bases__[0]) and
603
+ hasattr(attr_value, 'name') and
604
+ attr_value.name == spider_cls_or_name):
605
+ # 注册到全局注册表
606
+ registry[spider_cls_or_name] = attr_value
607
+ return attr_value
608
+ except ImportError:
609
+ continue
610
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
611
+ except (ImportError, AttributeError):
612
+ raise ValueError(f"Spider '{spider_cls_or_name}' not found in registry")
613
+ except ImportError:
614
+ raise ValueError(f"Cannot resolve spider name '{spider_cls_or_name}'")
615
+ else:
616
+ return spider_cls_or_name
617
+
618
+ def _merge_settings(self, additional_settings):
619
+ """合并配置"""
620
+ if not additional_settings:
621
+ return self._settings
622
+
623
+ # 这里可以实现更复杂的配置合并逻辑
624
+ from crawlo.settings.setting_manager import SettingManager
625
+ merged = SettingManager()
626
+
627
+ # 复制基础配置
628
+ if self._settings:
629
+ merged.update_attributes(self._settings.__dict__)
630
+
631
+ # 应用额外配置
632
+ merged.update_attributes(additional_settings)
633
+
634
+ return merged
635
+
636
+ def get_metrics(self) -> Dict[str, Any]:
637
+ """获取整体指标"""
638
+ total_duration = 0.0
639
+ if self._start_time and self._end_time:
640
+ total_duration = self._end_time - self._start_time
641
+
642
+ crawler_metrics = [crawler.metrics for crawler in self._crawlers]
643
+
644
+ return {
645
+ 'total_duration': total_duration,
646
+ 'crawler_count': len(self._crawlers),
647
+ 'total_requests': sum(m.request_count for m in crawler_metrics),
648
+ 'total_success': sum(m.success_count for m in crawler_metrics),
649
+ 'total_errors': sum(m.error_count for m in crawler_metrics),
650
+ 'average_success_rate': sum(m.get_success_rate() for m in crawler_metrics) / len(crawler_metrics) if crawler_metrics else 0.0
651
651
  }