crawlo 1.3.5__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (289) hide show
  1. crawlo/__init__.py +87 -87
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -341
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +45 -45
  16. crawlo/core/engine.py +439 -439
  17. crawlo/core/processor.py +40 -40
  18. crawlo/core/scheduler.py +257 -257
  19. crawlo/crawler.py +638 -638
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +228 -228
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +39 -39
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +57 -57
  34. crawlo/extension/log_stats.py +81 -81
  35. crawlo/extension/logging_extension.py +61 -61
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +27 -27
  40. crawlo/factories/base.py +68 -68
  41. crawlo/factories/crawler.py +103 -103
  42. crawlo/factories/registry.py +84 -84
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -257
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -291
  47. crawlo/initialization/__init__.py +39 -39
  48. crawlo/initialization/built_in.py +425 -425
  49. crawlo/initialization/context.py +141 -141
  50. crawlo/initialization/core.py +193 -193
  51. crawlo/initialization/phases.py +148 -148
  52. crawlo/initialization/registry.py +145 -145
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -23
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +37 -37
  58. crawlo/logging/config.py +96 -96
  59. crawlo/logging/factory.py +128 -128
  60. crawlo/logging/manager.py +111 -111
  61. crawlo/middleware/__init__.py +21 -21
  62. crawlo/middleware/default_header.py +132 -132
  63. crawlo/middleware/download_delay.py +104 -104
  64. crawlo/middleware/middleware_manager.py +135 -135
  65. crawlo/middleware/offsite.py +123 -123
  66. crawlo/middleware/proxy.py +386 -386
  67. crawlo/middleware/request_ignore.py +86 -86
  68. crawlo/middleware/response_code.py +163 -163
  69. crawlo/middleware/response_filter.py +136 -136
  70. crawlo/middleware/retry.py +124 -124
  71. crawlo/middleware/simple_proxy.py +65 -65
  72. crawlo/mode_manager.py +212 -212
  73. crawlo/network/__init__.py +21 -21
  74. crawlo/network/request.py +379 -379
  75. crawlo/network/response.py +359 -359
  76. crawlo/pipelines/__init__.py +21 -21
  77. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  78. crawlo/pipelines/console_pipeline.py +39 -39
  79. crawlo/pipelines/csv_pipeline.py +316 -316
  80. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  81. crawlo/pipelines/json_pipeline.py +218 -218
  82. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  83. crawlo/pipelines/mongo_pipeline.py +131 -131
  84. crawlo/pipelines/mysql_pipeline.py +325 -325
  85. crawlo/pipelines/pipeline_manager.py +76 -76
  86. crawlo/pipelines/redis_dedup_pipeline.py +166 -166
  87. crawlo/project.py +327 -327
  88. crawlo/queue/pqueue.py +42 -42
  89. crawlo/queue/queue_manager.py +503 -503
  90. crawlo/queue/redis_priority_queue.py +326 -326
  91. crawlo/settings/__init__.py +7 -7
  92. crawlo/settings/default_settings.py +321 -321
  93. crawlo/settings/setting_manager.py +214 -214
  94. crawlo/spider/__init__.py +657 -657
  95. crawlo/stats_collector.py +73 -73
  96. crawlo/subscriber.py +129 -129
  97. crawlo/task_manager.py +138 -138
  98. crawlo/templates/crawlo.cfg.tmpl +10 -10
  99. crawlo/templates/project/__init__.py.tmpl +3 -3
  100. crawlo/templates/project/items.py.tmpl +17 -17
  101. crawlo/templates/project/middlewares.py.tmpl +118 -118
  102. crawlo/templates/project/pipelines.py.tmpl +96 -96
  103. crawlo/templates/project/settings.py.tmpl +167 -167
  104. crawlo/templates/project/settings_distributed.py.tmpl +166 -166
  105. crawlo/templates/project/settings_gentle.py.tmpl +166 -166
  106. crawlo/templates/project/settings_high_performance.py.tmpl +167 -167
  107. crawlo/templates/project/settings_minimal.py.tmpl +65 -65
  108. crawlo/templates/project/settings_simple.py.tmpl +164 -164
  109. crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
  110. crawlo/templates/run.py.tmpl +34 -34
  111. crawlo/templates/spider/spider.py.tmpl +143 -143
  112. crawlo/templates/spiders_init.py.tmpl +9 -9
  113. crawlo/tools/__init__.py +200 -200
  114. crawlo/tools/anti_crawler.py +268 -268
  115. crawlo/tools/authenticated_proxy.py +240 -240
  116. crawlo/tools/data_formatter.py +225 -225
  117. crawlo/tools/data_validator.py +180 -180
  118. crawlo/tools/date_tools.py +289 -289
  119. crawlo/tools/distributed_coordinator.py +388 -388
  120. crawlo/tools/encoding_converter.py +127 -127
  121. crawlo/tools/network_diagnostic.py +364 -364
  122. crawlo/tools/request_tools.py +82 -82
  123. crawlo/tools/retry_mechanism.py +224 -224
  124. crawlo/tools/scenario_adapter.py +262 -262
  125. crawlo/tools/text_cleaner.py +232 -232
  126. crawlo/utils/__init__.py +34 -34
  127. crawlo/utils/batch_processor.py +259 -259
  128. crawlo/utils/class_loader.py +25 -25
  129. crawlo/utils/controlled_spider_mixin.py +439 -439
  130. crawlo/utils/db_helper.py +343 -343
  131. crawlo/utils/enhanced_error_handler.py +356 -356
  132. crawlo/utils/env_config.py +142 -142
  133. crawlo/utils/error_handler.py +165 -165
  134. crawlo/utils/func_tools.py +82 -82
  135. crawlo/utils/large_scale_config.py +286 -286
  136. crawlo/utils/large_scale_helper.py +344 -344
  137. crawlo/utils/log.py +79 -79
  138. crawlo/utils/performance_monitor.py +285 -285
  139. crawlo/utils/queue_helper.py +175 -175
  140. crawlo/utils/redis_connection_pool.py +388 -388
  141. crawlo/utils/redis_key_validator.py +198 -198
  142. crawlo/utils/request.py +267 -267
  143. crawlo/utils/request_serializer.py +225 -225
  144. crawlo/utils/spider_loader.py +61 -61
  145. crawlo/utils/system.py +11 -11
  146. crawlo/utils/tools.py +4 -4
  147. crawlo/utils/url.py +39 -39
  148. {crawlo-1.3.5.dist-info → crawlo-1.3.6.dist-info}/METADATA +1126 -1126
  149. crawlo-1.3.6.dist-info/RECORD +290 -0
  150. examples/__init__.py +7 -7
  151. tests/__init__.py +7 -7
  152. tests/advanced_tools_example.py +275 -275
  153. tests/authenticated_proxy_example.py +106 -106
  154. tests/baidu_performance_test.py +108 -108
  155. tests/baidu_test.py +59 -59
  156. tests/cleaners_example.py +160 -160
  157. tests/comprehensive_framework_test.py +212 -212
  158. tests/comprehensive_test.py +81 -81
  159. tests/comprehensive_testing_summary.md +186 -186
  160. tests/config_validation_demo.py +142 -142
  161. tests/controlled_spider_example.py +205 -205
  162. tests/date_tools_example.py +180 -180
  163. tests/debug_configure.py +69 -69
  164. tests/debug_framework_logger.py +84 -84
  165. tests/debug_log_config.py +126 -126
  166. tests/debug_log_levels.py +63 -63
  167. tests/debug_pipelines.py +66 -66
  168. tests/detailed_log_test.py +233 -233
  169. tests/distributed_test.py +66 -66
  170. tests/distributed_test_debug.py +76 -76
  171. tests/dynamic_loading_example.py +523 -523
  172. tests/dynamic_loading_test.py +104 -104
  173. tests/env_config_example.py +133 -133
  174. tests/error_handling_example.py +171 -171
  175. tests/final_comprehensive_test.py +151 -151
  176. tests/final_log_test.py +260 -260
  177. tests/final_validation_test.py +182 -182
  178. tests/fix_log_test.py +142 -142
  179. tests/framework_performance_test.py +202 -202
  180. tests/log_buffering_test.py +111 -111
  181. tests/log_generation_timing_test.py +153 -153
  182. tests/optimized_performance_test.py +211 -211
  183. tests/performance_comparison.py +245 -245
  184. tests/queue_blocking_test.py +113 -113
  185. tests/queue_test.py +89 -89
  186. tests/redis_key_validation_demo.py +130 -130
  187. tests/request_params_example.py +150 -150
  188. tests/response_improvements_example.py +144 -144
  189. tests/scrapy_comparison/ofweek_scrapy.py +138 -138
  190. tests/scrapy_comparison/scrapy_test.py +133 -133
  191. tests/simple_command_test.py +119 -119
  192. tests/simple_crawlo_test.py +127 -127
  193. tests/simple_log_test.py +57 -57
  194. tests/simple_log_test2.py +137 -137
  195. tests/simple_optimization_test.py +128 -128
  196. tests/simple_queue_type_test.py +42 -0
  197. tests/simple_spider_test.py +49 -49
  198. tests/simple_test.py +47 -47
  199. tests/spider_log_timing_test.py +177 -177
  200. tests/test_advanced_tools.py +148 -148
  201. tests/test_all_commands.py +230 -230
  202. tests/test_all_redis_key_configs.py +145 -145
  203. tests/test_authenticated_proxy.py +141 -141
  204. tests/test_batch_processor.py +178 -178
  205. tests/test_cleaners.py +54 -54
  206. tests/test_component_factory.py +174 -174
  207. tests/test_comprehensive.py +146 -146
  208. tests/test_config_consistency.py +80 -80
  209. tests/test_config_merge.py +152 -152
  210. tests/test_config_validator.py +182 -182
  211. tests/test_controlled_spider_mixin.py +79 -79
  212. tests/test_crawlo_proxy_integration.py +108 -108
  213. tests/test_date_tools.py +123 -123
  214. tests/test_default_header_middleware.py +158 -158
  215. tests/test_distributed.py +65 -65
  216. tests/test_double_crawlo_fix.py +207 -207
  217. tests/test_double_crawlo_fix_simple.py +124 -124
  218. tests/test_download_delay_middleware.py +221 -221
  219. tests/test_downloader_proxy_compatibility.py +268 -268
  220. tests/test_dynamic_downloaders_proxy.py +124 -124
  221. tests/test_dynamic_proxy.py +92 -92
  222. tests/test_dynamic_proxy_config.py +146 -146
  223. tests/test_dynamic_proxy_real.py +109 -109
  224. tests/test_edge_cases.py +303 -303
  225. tests/test_enhanced_error_handler.py +270 -270
  226. tests/test_enhanced_error_handler_comprehensive.py +245 -245
  227. tests/test_env_config.py +121 -121
  228. tests/test_error_handler_compatibility.py +112 -112
  229. tests/test_factories.py +252 -252
  230. tests/test_final_validation.py +153 -153
  231. tests/test_framework_env_usage.py +103 -103
  232. tests/test_framework_logger.py +66 -66
  233. tests/test_framework_startup.py +64 -64
  234. tests/test_get_component_logger.py +83 -83
  235. tests/test_integration.py +169 -169
  236. tests/test_item_dedup_redis_key.py +122 -122
  237. tests/test_large_scale_config.py +112 -112
  238. tests/test_large_scale_helper.py +235 -235
  239. tests/test_logging_system.py +282 -282
  240. tests/test_mode_change.py +72 -72
  241. tests/test_mode_consistency.py +51 -51
  242. tests/test_offsite_middleware.py +221 -221
  243. tests/test_parsel.py +29 -29
  244. tests/test_performance.py +327 -327
  245. tests/test_performance_monitor.py +115 -115
  246. tests/test_proxy_api.py +264 -264
  247. tests/test_proxy_health_check.py +32 -32
  248. tests/test_proxy_middleware.py +121 -121
  249. tests/test_proxy_middleware_enhanced.py +216 -216
  250. tests/test_proxy_middleware_integration.py +136 -136
  251. tests/test_proxy_middleware_refactored.py +184 -184
  252. tests/test_proxy_providers.py +56 -56
  253. tests/test_proxy_stats.py +19 -19
  254. tests/test_proxy_strategies.py +59 -59
  255. tests/test_queue_empty_check.py +41 -41
  256. tests/test_queue_manager_double_crawlo.py +173 -173
  257. tests/test_queue_manager_redis_key.py +176 -176
  258. tests/test_queue_type.py +107 -0
  259. tests/test_random_user_agent.py +72 -72
  260. tests/test_real_scenario_proxy.py +195 -195
  261. tests/test_redis_config.py +28 -28
  262. tests/test_redis_connection_pool.py +294 -294
  263. tests/test_redis_key_naming.py +181 -181
  264. tests/test_redis_key_validator.py +123 -123
  265. tests/test_redis_queue.py +224 -224
  266. tests/test_request_ignore_middleware.py +182 -182
  267. tests/test_request_params.py +111 -111
  268. tests/test_request_serialization.py +70 -70
  269. tests/test_response_code_middleware.py +349 -349
  270. tests/test_response_filter_middleware.py +427 -427
  271. tests/test_response_improvements.py +152 -152
  272. tests/test_retry_middleware.py +241 -241
  273. tests/test_scheduler.py +252 -252
  274. tests/test_scheduler_config_update.py +133 -133
  275. tests/test_simple_response.py +61 -61
  276. tests/test_telecom_spider_redis_key.py +205 -205
  277. tests/test_template_content.py +87 -87
  278. tests/test_template_redis_key.py +134 -134
  279. tests/test_tools.py +159 -159
  280. tests/test_user_agents.py +96 -96
  281. tests/tools_example.py +260 -260
  282. tests/untested_features_report.md +138 -138
  283. tests/verify_debug.py +51 -51
  284. tests/verify_distributed.py +117 -117
  285. tests/verify_log_fix.py +111 -111
  286. crawlo-1.3.5.dist-info/RECORD +0 -288
  287. {crawlo-1.3.5.dist-info → crawlo-1.3.6.dist-info}/WHEEL +0 -0
  288. {crawlo-1.3.5.dist-info → crawlo-1.3.6.dist-info}/entry_points.txt +0 -0
  289. {crawlo-1.3.5.dist-info → crawlo-1.3.6.dist-info}/top_level.txt +0 -0
crawlo/spider/__init__.py CHANGED
@@ -1,657 +1,657 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- Crawlo Spider Module
5
- ==================
6
- 提供爬虫基类和相关功能。
7
-
8
- 核心功能:
9
- - Spider基类:所有爬虫的基础类
10
- - 自动注册机制:通过元类自动注册爬虫
11
- - 配置管理:支持自定义设置和链式调用
12
- - 生命周期管理:开启/关闭钩子函数
13
- - 分布式支持:智能检测运行模式
14
-
15
- 使用示例:
16
- class MySpider(Spider):
17
- name = 'my_spider'
18
- start_urls = ['http://example.com']
19
-
20
- # 自定义配置
21
- custom_settings = {
22
- 'DOWNLOADER_TYPE': 'httpx',
23
- 'CONCURRENCY': 10
24
- }
25
-
26
- def parse(self, response):
27
- # 解析逻辑
28
- yield Item(data=response.json())
29
- """
30
- from __future__ import annotations
31
- from typing import Type, Any, Optional, List, Dict, Union, Iterator, AsyncIterator
32
- from ..network.request import Request
33
- from ..utils.log import get_logger
34
-
35
-
36
- # 全局爬虫注册表
37
- _DEFAULT_SPIDER_REGISTRY: dict[str, Type[Spider]] = {}
38
-
39
-
40
- class SpiderMeta(type):
41
- """
42
- 爬虫元类,提供自动注册功能
43
-
44
- 功能:
45
- - 自动注册爬虫到全局注册表
46
- - 验证爬虫名称的唯一性
47
- - 提供完整的错误提示
48
- """
49
-
50
- def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, Any], **kwargs):
51
- cls = super().__new__(mcs, name, bases, namespace)
52
-
53
- # 检查是否为Spider子类
54
- is_spider_subclass = any(
55
- base is Spider or (isinstance(base, type) and issubclass(base, Spider))
56
- for base in bases
57
- )
58
- if not is_spider_subclass:
59
- return cls
60
-
61
- # 验证爬虫名称
62
- spider_name = namespace.get('name')
63
- if not isinstance(spider_name, str):
64
- raise AttributeError(
65
- f"爬虫类 '{cls.__name__}' 必须定义字符串类型的 'name' 属性。\n"
66
- f"示例: name = 'my_spider'"
67
- )
68
-
69
- # 检查名称唯一性
70
- if spider_name in _DEFAULT_SPIDER_REGISTRY:
71
- existing_class = _DEFAULT_SPIDER_REGISTRY[spider_name]
72
- raise ValueError(
73
- f"爬虫名称 '{spider_name}' 已被 {existing_class.__name__} 占用。\n"
74
- f"请确保每个爬虫的 name 属性全局唯一。\n"
75
- f"建议使用格式: 'project_module_function'"
76
- )
77
-
78
- # 注册爬虫
79
- _DEFAULT_SPIDER_REGISTRY[spider_name] = cls
80
- # 延迟初始化logger避免模块级别阻塞
81
- try:
82
- from crawlo.utils.log import get_logger
83
- get_logger(__name__).debug(f"自动注册爬虫: {spider_name} -> {cls.__name__}")
84
- except:
85
- # 如果日志系统未初始化,静默失败
86
- pass
87
-
88
- return cls
89
-
90
-
91
- class Spider(metaclass=SpiderMeta):
92
- """
93
- 爬虫基类 - 所有爬虫实现的基础
94
-
95
- 必须定义的属性:
96
- - name: 爬虫名称,必须全局唯一
97
-
98
- 可选配置:
99
- - start_urls: 起始 URL 列表
100
- - custom_settings: 自定义设置字典
101
- - allowed_domains: 允许的域名列表
102
-
103
- 必须实现的方法:
104
- - parse(response): 解析响应的主方法
105
-
106
- 可选实现的方法:
107
- - spider_opened(): 爬虫开启时调用
108
- - spider_closed(): 爬虫关闭时调用
109
- - start_requests(): 生成初始请求(默认使用start_urls)
110
-
111
- 示例:
112
- class MySpider(Spider):
113
- name = 'example_spider'
114
- start_urls = ['https://example.com']
115
-
116
- custom_settings = {
117
- 'DOWNLOADER_TYPE': 'httpx',
118
- 'CONCURRENCY': 5,
119
- 'DOWNLOAD_DELAY': 1.0
120
- }
121
-
122
- def parse(self, response):
123
- # 提取数据
124
- data = response.css('title::text').get()
125
- yield {'title': data}
126
-
127
- # 生成新请求
128
- for link in response.css('a::attr(href)').getall():
129
- yield Request(url=link, callback=self.parse_detail)
130
- """
131
-
132
- # 必须定义的属性
133
- name: str = None
134
-
135
- # 可选属性
136
- start_urls: List[str] = None
137
- custom_settings: Dict[str, Any] = None
138
- allowed_domains: List[str] = None
139
-
140
- def __init__(self, name: str = None, **kwargs):
141
- """
142
- 初始化爬虫实例
143
-
144
- :param name: 爬虫名称(可选,默认使用类属性)
145
- :param kwargs: 其他初始化参数
146
- """
147
- # 初始化基本属性
148
- if not hasattr(self, 'start_urls') or self.start_urls is None:
149
- self.start_urls = []
150
- if not hasattr(self, 'custom_settings') or self.custom_settings is None:
151
- self.custom_settings = {}
152
- if not hasattr(self, 'allowed_domains') or self.allowed_domains is None:
153
- self.allowed_domains = []
154
-
155
- # 设置爬虫名称
156
- self.name = name or self.name
157
- if not self.name:
158
- raise ValueError(f"爬虫 {self.__class__.__name__} 必须指定 name 属性")
159
-
160
- # 初始化其他属性
161
- self.crawler = None
162
- # 延迟初始化logger避免阻塞
163
- self._logger = None
164
- self.stats = None
165
-
166
- # 应用额外参数
167
- for key, value in kwargs.items():
168
- setattr(self, key, value)
169
-
170
- @property
171
- def logger(self):
172
- """延迟初始化logger"""
173
- if self._logger is None:
174
- from crawlo.utils.log import get_logger
175
- self._logger = get_logger(self.name)
176
- return self._logger
177
-
178
- @classmethod
179
- def create_instance(cls, crawler) -> 'Spider':
180
- """
181
- 创建爬虫实例并绑定 crawler
182
-
183
- :param crawler: Crawler 实例
184
- :return: 爬虫实例
185
- """
186
- spider = cls()
187
- spider.crawler = crawler
188
- spider.stats = getattr(crawler, 'stats', None)
189
-
190
- # 合并自定义设置 - 使用延迟应用避免初始化时的循环依赖
191
- if hasattr(spider, 'custom_settings') and spider.custom_settings:
192
- # 延迟到真正需要时才应用设置
193
- spider._pending_settings = spider.custom_settings.copy()
194
- spider.logger.debug(f"准备应用 {len(spider.custom_settings)} 项自定义设置")
195
-
196
- return spider
197
-
198
- def apply_pending_settings(self):
199
- """应用待处理的设置(在初始化完成后调用)"""
200
- if hasattr(self, '_pending_settings') and self._pending_settings:
201
- for key, value in self._pending_settings.items():
202
- if self.crawler and hasattr(self.crawler, 'settings'):
203
- self.crawler.settings.set(key, value)
204
- self.logger.debug(f"应用自定义设置: {key} = {value}")
205
- # 清除待处理的设置
206
- delattr(self, '_pending_settings')
207
-
208
- def start_requests(self) -> Iterator[Request]:
209
- """
210
- 生成初始请求
211
-
212
- 默认行为:
213
- - 使用 start_urls 生成请求
214
- - 智能检测分布式模式决定是否去重
215
- - 支持单个 start_url 属性(兼容性)
216
- - 支持批量生成优化(大规模URL场景)
217
-
218
- :return: Request 迭代器
219
- """
220
- # 检测是否为分布式模式
221
- is_distributed = self._is_distributed_mode()
222
-
223
- # 获取批量处理配置
224
- batch_size = self._get_batch_size()
225
-
226
- # 从 start_urls 生成请求
227
- if self.start_urls:
228
- generated_count = 0
229
- for url in self.start_urls:
230
- if self._is_allowed_domain(url):
231
- yield Request(
232
- url=url,
233
- callback=self.parse,
234
- dont_filter=not is_distributed,
235
- meta={'spider_name': self.name}
236
- )
237
- generated_count += 1
238
-
239
- # 大规模URL时进行批量控制
240
- if batch_size > 0 and generated_count % batch_size == 0:
241
- self.logger.debug(f"已生成 {generated_count} 个请求(批量大小: {batch_size})")
242
- else:
243
- self.logger.warning(f"跳过不允许的域名: {url}")
244
-
245
- # 兼容单个 start_url 属性
246
- elif hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
247
- url = getattr(self, 'start_url')
248
- if self._is_allowed_domain(url):
249
- yield Request(
250
- url=url,
251
- callback=self.parse,
252
- dont_filter=not is_distributed,
253
- meta={'spider_name': self.name}
254
- )
255
- else:
256
- self.logger.warning(f"跳过不允许的域名: {url}")
257
-
258
- else:
259
- self.logger.warning(
260
- f"爬虫 {self.name} 没有定义 start_urls 或 start_url。\n"
261
- f"请在爬虫类中定义或重写 start_requests() 方法。"
262
- )
263
-
264
- def _get_batch_size(self) -> int:
265
- """
266
- 获取批量处理大小配置
267
-
268
- 用于大规模URL场景的性能优化
269
-
270
- :return: 批量大小(0表示无限制)
271
- """
272
- if not self.crawler:
273
- return 0
274
-
275
- # 从设置中获取批量大小
276
- batch_size = self.crawler.settings.get_int('SPIDER_BATCH_SIZE', 0)
277
-
278
- # 如果start_urls超过一定数量,自动启用批量模式
279
- if batch_size == 0 and self.start_urls and len(self.start_urls) > 1000:
280
- batch_size = 500 # 默认批量大小
281
- self.logger.info(f"检测到大量start_urls ({len(self.start_urls)}),启用批量模式 (批量大小: {batch_size})")
282
-
283
- return batch_size
284
-
285
- def _is_distributed_mode(self) -> bool:
286
- """
287
- 智能检测是否为分布式模式
288
-
289
- 检测条件:
290
- - QUEUE_TYPE = 'redis'
291
- - FILTER_CLASS 包含 'aioredis_filter'
292
- - RUN_MODE = 'distributed'
293
-
294
- :return: 是否为分布式模式
295
- """
296
- if not self.crawler:
297
- return False
298
-
299
- settings = self.crawler.settings
300
-
301
- # 检查多个条件来判断是否为分布式模式
302
- queue_type = settings.get('QUEUE_TYPE', 'memory')
303
- filter_class = settings.get('FILTER_CLASS', '')
304
- run_mode = settings.get('RUN_MODE', 'standalone')
305
-
306
- # 分布式模式的标志
307
- is_redis_queue = queue_type == 'redis'
308
- is_redis_filter = 'aioredis_filter' in filter_class.lower()
309
- is_distributed_run_mode = run_mode == 'distributed'
310
-
311
- distributed = is_redis_queue or is_redis_filter or is_distributed_run_mode
312
-
313
- if distributed:
314
- self.logger.debug("检测到分布式模式,启用请求去重")
315
- else:
316
- self.logger.debug("检测到单机模式,禁用请求去重")
317
-
318
- return distributed
319
-
320
- def _is_allowed_domain(self, url: str) -> bool:
321
- """
322
- 检查URL是否在允许的域名列表中
323
-
324
- :param url: 要检查的URL
325
- :return: 是否允许
326
- """
327
- if not self.allowed_domains:
328
- return True
329
-
330
- from urllib.parse import urlparse
331
- try:
332
- domain = urlparse(url).netloc.lower()
333
- return any(
334
- domain == allowed.lower() or domain.endswith('.' + allowed.lower())
335
- for allowed in self.allowed_domains
336
- )
337
- except Exception as e:
338
- self.logger.warning(f"URL解析失败: {url} - {e}")
339
- return False
340
-
341
- def parse(self, response):
342
- """
343
- 解析响应的主方法(必须实现)
344
-
345
- :param response: 响应对象
346
- :return: 生成的 Item 或 Request
347
- """
348
- raise NotImplementedError(
349
- f"爬虫 {self.__class__.__name__} 必须实现 parse() 方法\n"
350
- f"示例:\n"
351
- f"def parse(self, response):\n"
352
- f" # 提取数据\n"
353
- f" yield {{'title': response.css('title::text').get()}}\n"
354
- f" # 生成新请求\n"
355
- f" for link in response.css('a::attr(href)').getall():\n"
356
- f" yield Request(url=link)"
357
- )
358
-
359
- async def spider_opened(self):
360
- """
361
- 爬虫开启时调用的钩子函数
362
-
363
- 可用于:
364
- - 初始化资源
365
- - 连接数据库
366
- - 设置初始状态
367
- """
368
- self.logger.info(f"Spider {self.name} opened")
369
-
370
- async def spider_closed(self):
371
- """
372
- 爬虫关闭时调用的钩子函数
373
-
374
- 可用于:
375
- - 清理资源
376
- - 关闭数据库连接
377
- """
378
- # 不再输出任何信息,避免与统计信息重复
379
- # 统计信息由StatsCollector负责输出
380
- pass
381
-
382
- def __str__(self) -> str:
383
- return f"{self.__class__.__name__}(name='{self.name}')"
384
-
385
- def __repr__(self) -> str:
386
- return self.__str__()
387
-
388
- def set_custom_setting(self, key: str, value: Any) -> 'Spider':
389
- """
390
- 设置自定义配置(链式调用)
391
-
392
- :param key: 配置键名
393
- :param value: 配置值
394
- :return: self(支持链式调用)
395
-
396
- 示例:
397
- spider.set_custom_setting('CONCURRENCY', 10)\
398
- .set_custom_setting('DOWNLOAD_DELAY', 1.0)
399
- """
400
- if not hasattr(self, 'custom_settings') or self.custom_settings is None:
401
- self.custom_settings = {}
402
-
403
- self.custom_settings[key] = value
404
- self.logger.debug(f"设置自定义配置: {key} = {value}")
405
-
406
- # 如果已绑定crawler,立即应用设置
407
- if self.crawler:
408
- self.crawler.settings.set(key, value)
409
-
410
- return self
411
-
412
- def get_custom_setting(self, key: str, default: Any = None) -> Any:
413
- """
414
- 获取自定义配置值
415
-
416
- :param key: 配置键名
417
- :param default: 默认值
418
- :return: 配置值
419
- """
420
- if hasattr(self, 'custom_settings') and self.custom_settings:
421
- return self.custom_settings.get(key, default)
422
- return default
423
-
424
- def get_spider_info(self) -> Dict[str, Any]:
425
- """
426
- 获取爬虫详细信息
427
-
428
- :return: 爬虫信息字典
429
- """
430
- info = {
431
- 'name': self.name,
432
- 'class_name': self.__class__.__name__,
433
- 'module': self.__module__,
434
- 'start_urls_count': len(self.start_urls) if self.start_urls else 0,
435
- 'allowed_domains_count': len(self.allowed_domains) if self.allowed_domains else 0,
436
- 'custom_settings_count': len(self.custom_settings) if self.custom_settings else 0,
437
- 'is_distributed': self._is_distributed_mode() if self.crawler else None,
438
- 'has_crawler': self.crawler is not None,
439
- 'logger_name': self.logger.name if hasattr(self, 'logger') else None
440
- }
441
-
442
- # 添加方法检查
443
- info['methods'] = {
444
- 'has_parse': callable(getattr(self, 'parse', None)),
445
- 'has_spider_opened': callable(getattr(self, 'spider_opened', None)),
446
- 'has_spider_closed': callable(getattr(self, 'spider_closed', None)),
447
- 'has_start_requests': callable(getattr(self, 'start_requests', None))
448
- }
449
-
450
- return info
451
-
452
- def make_request(self, url: str, callback=None, **kwargs) -> Request:
453
- """
454
- 便捷方法:创建 Request 对象
455
-
456
- :param url: 请求URL
457
- :param callback: 回调函数(默认为parse)
458
- :param kwargs: 其他Request参数
459
- :return: Request对象
460
- """
461
- return Request(
462
- url=url,
463
- callback=callback or self.parse,
464
- meta={'spider_name': self.name},
465
- **kwargs
466
- )
467
-
468
-
469
- # === 高级爬虫功能扩展 ===
470
-
471
- class SpiderStatsTracker:
472
- """
473
- 爬虫统计跟踪器
474
- 提供详细的性能监控功能
475
- """
476
-
477
- def __init__(self, spider_name: str):
478
- self.spider_name = spider_name
479
- self.start_time = None
480
- self.end_time = None
481
- self.request_count = 0
482
- self.response_count = 0
483
- self.item_count = 0
484
- self.error_count = 0
485
- self.domain_stats = {}
486
-
487
- def start_tracking(self):
488
- """开始统计"""
489
- import time
490
- self.start_time = time.time()
491
-
492
- def stop_tracking(self):
493
- """停止统计"""
494
- import time
495
- self.end_time = time.time()
496
-
497
- def record_request(self, url: str):
498
- """记录请求"""
499
- self.request_count += 1
500
- from urllib.parse import urlparse
501
- domain = urlparse(url).netloc
502
- self.domain_stats[domain] = self.domain_stats.get(domain, 0) + 1
503
-
504
- def record_response(self):
505
- """记录响应"""
506
- self.response_count += 1
507
-
508
- def record_item(self):
509
- """记录Item"""
510
- self.item_count += 1
511
-
512
- def record_error(self):
513
- """记录错误"""
514
- self.error_count += 1
515
-
516
- def get_summary(self) -> Dict[str, Any]:
517
- """获取统计摘要"""
518
- duration = (self.end_time - self.start_time) if (self.start_time and self.end_time) else 0
519
-
520
- return {
521
- 'spider_name': self.spider_name,
522
- 'duration_seconds': round(duration, 2),
523
- 'requests': self.request_count,
524
- 'responses': self.response_count,
525
- 'items': self.item_count,
526
- 'errors': self.error_count,
527
- 'success_rate': round((self.response_count / max(1, self.request_count)) * 100, 2),
528
- 'requests_per_second': round(self.request_count / max(1, duration), 2),
529
- 'top_domains': sorted(
530
- self.domain_stats.items(),
531
- key=lambda x: x[1],
532
- reverse=True
533
- )[:5]
534
- }
535
-
536
-
537
- def create_spider_from_template(name: str, start_urls: List[str], **options) -> Type[Spider]:
538
- """
539
- 从模板快速创建爬虫类
540
-
541
- :param name: 爬虫名称
542
- :param start_urls: 起始URL列表
543
- :param options: 其他选项
544
- :return: 新创建的爬虫类
545
-
546
- 示例:
547
- MySpider = create_spider_from_template(
548
- name='quick_spider',
549
- start_urls=['http://example.com'],
550
- allowed_domains=['example.com'],
551
- custom_settings={'CONCURRENCY': 5}
552
- )
553
- """
554
-
555
- # 动态创建爬虫类
556
- class_attrs = {
557
- 'name': name,
558
- 'start_urls': start_urls,
559
- 'allowed_domains': options.get('allowed_domains', []),
560
- 'custom_settings': options.get('custom_settings', {})
561
- }
562
-
563
- # 添加自定义parse方法
564
- if 'parse_function' in options:
565
- class_attrs['parse'] = options['parse_function']
566
- else:
567
- def default_parse(self, response):
568
- """默认解析方法"""
569
- yield {'url': response.url, 'title': getattr(response, 'title', 'N/A')}
570
- class_attrs['parse'] = default_parse
571
-
572
- # 创建类名
573
- class_name = options.get('class_name', f"Generated{name.replace('_', '').title()}Spider")
574
-
575
- # 动态创建类
576
- spider_class = type(class_name, (Spider,), class_attrs)
577
-
578
- get_logger(__name__).info(f"动态创建爬虫类: {class_name} (name='{name}')")
579
-
580
- return spider_class
581
-
582
-
583
- # === 公共只读接口 ===
584
- def get_global_spider_registry() -> dict[str, Type[Spider]]:
585
- """
586
- 获取全局爬虫注册表的副本
587
-
588
- :return: 爬虫注册表的副本
589
- """
590
- return _DEFAULT_SPIDER_REGISTRY.copy()
591
-
592
-
593
- def get_spider_by_name(name: str) -> Optional[Type[Spider]]:
594
- """
595
- 根据名称获取爬虫类
596
-
597
- :param name: 爬虫名称
598
- :return: 爬虫类或None
599
- """
600
- return _DEFAULT_SPIDER_REGISTRY.get(name)
601
-
602
-
603
- def get_all_spider_classes() -> List[Type[Spider]]:
604
- """
605
- 获取所有注册的爬虫类
606
-
607
- :return: 爬虫类列表
608
- """
609
- return list(set(_DEFAULT_SPIDER_REGISTRY.values()))
610
-
611
-
612
- def get_spider_names() -> List[str]:
613
- """
614
- 获取所有爬虫名称
615
-
616
- :return: 爬虫名称列表
617
- """
618
- return list(_DEFAULT_SPIDER_REGISTRY.keys())
619
-
620
-
621
- def is_spider_registered(name: str) -> bool:
622
- """
623
- 检查爬虫是否已注册
624
-
625
- :param name: 爬虫名称
626
- :return: 是否已注册
627
- """
628
- return name in _DEFAULT_SPIDER_REGISTRY
629
-
630
-
631
- def unregister_spider(name: str) -> bool:
632
- """
633
- 取消注册爬虫(仅用于测试)
634
-
635
- :param name: 爬虫名称
636
- :return: 是否成功取消注册
637
- """
638
- if name in _DEFAULT_SPIDER_REGISTRY:
639
- del _DEFAULT_SPIDER_REGISTRY[name]
640
- return True
641
- return False
642
-
643
-
644
- # 导出的公共接口
645
- __all__ = [
646
- 'Spider',
647
- 'SpiderMeta',
648
- 'SpiderStatsTracker',
649
- 'create_spider_from_template',
650
- 'get_global_spider_registry',
651
- 'get_spider_by_name',
652
- 'get_all_spider_classes',
653
- 'get_spider_names',
654
- 'is_spider_registered',
655
- 'unregister_spider'
656
- ]
657
-
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ Crawlo Spider Module
5
+ ==================
6
+ 提供爬虫基类和相关功能。
7
+
8
+ 核心功能:
9
+ - Spider基类:所有爬虫的基础类
10
+ - 自动注册机制:通过元类自动注册爬虫
11
+ - 配置管理:支持自定义设置和链式调用
12
+ - 生命周期管理:开启/关闭钩子函数
13
+ - 分布式支持:智能检测运行模式
14
+
15
+ 使用示例:
16
+ class MySpider(Spider):
17
+ name = 'my_spider'
18
+ start_urls = ['http://example.com']
19
+
20
+ # 自定义配置
21
+ custom_settings = {
22
+ 'DOWNLOADER_TYPE': 'httpx',
23
+ 'CONCURRENCY': 10
24
+ }
25
+
26
+ def parse(self, response):
27
+ # 解析逻辑
28
+ yield Item(data=response.json())
29
+ """
30
+ from __future__ import annotations
31
+ from typing import Type, Any, Optional, List, Dict, Union, Iterator, AsyncIterator
32
+ from ..network.request import Request
33
+ from ..utils.log import get_logger
34
+
35
+
36
+ # 全局爬虫注册表
37
+ _DEFAULT_SPIDER_REGISTRY: dict[str, Type[Spider]] = {}
38
+
39
+
40
+ class SpiderMeta(type):
41
+ """
42
+ 爬虫元类,提供自动注册功能
43
+
44
+ 功能:
45
+ - 自动注册爬虫到全局注册表
46
+ - 验证爬虫名称的唯一性
47
+ - 提供完整的错误提示
48
+ """
49
+
50
+ def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, Any], **kwargs):
51
+ cls = super().__new__(mcs, name, bases, namespace)
52
+
53
+ # 检查是否为Spider子类
54
+ is_spider_subclass = any(
55
+ base is Spider or (isinstance(base, type) and issubclass(base, Spider))
56
+ for base in bases
57
+ )
58
+ if not is_spider_subclass:
59
+ return cls
60
+
61
+ # 验证爬虫名称
62
+ spider_name = namespace.get('name')
63
+ if not isinstance(spider_name, str):
64
+ raise AttributeError(
65
+ f"爬虫类 '{cls.__name__}' 必须定义字符串类型的 'name' 属性。\n"
66
+ f"示例: name = 'my_spider'"
67
+ )
68
+
69
+ # 检查名称唯一性
70
+ if spider_name in _DEFAULT_SPIDER_REGISTRY:
71
+ existing_class = _DEFAULT_SPIDER_REGISTRY[spider_name]
72
+ raise ValueError(
73
+ f"爬虫名称 '{spider_name}' 已被 {existing_class.__name__} 占用。\n"
74
+ f"请确保每个爬虫的 name 属性全局唯一。\n"
75
+ f"建议使用格式: 'project_module_function'"
76
+ )
77
+
78
+ # 注册爬虫
79
+ _DEFAULT_SPIDER_REGISTRY[spider_name] = cls
80
+ # 延迟初始化logger避免模块级别阻塞
81
+ try:
82
+ from crawlo.utils.log import get_logger
83
+ get_logger(__name__).debug(f"自动注册爬虫: {spider_name} -> {cls.__name__}")
84
+ except:
85
+ # 如果日志系统未初始化,静默失败
86
+ pass
87
+
88
+ return cls
89
+
90
+
91
+ class Spider(metaclass=SpiderMeta):
92
+ """
93
+ 爬虫基类 - 所有爬虫实现的基础
94
+
95
+ 必须定义的属性:
96
+ - name: 爬虫名称,必须全局唯一
97
+
98
+ 可选配置:
99
+ - start_urls: 起始 URL 列表
100
+ - custom_settings: 自定义设置字典
101
+ - allowed_domains: 允许的域名列表
102
+
103
+ 必须实现的方法:
104
+ - parse(response): 解析响应的主方法
105
+
106
+ 可选实现的方法:
107
+ - spider_opened(): 爬虫开启时调用
108
+ - spider_closed(): 爬虫关闭时调用
109
+ - start_requests(): 生成初始请求(默认使用start_urls)
110
+
111
+ 示例:
112
+ class MySpider(Spider):
113
+ name = 'example_spider'
114
+ start_urls = ['https://example.com']
115
+
116
+ custom_settings = {
117
+ 'DOWNLOADER_TYPE': 'httpx',
118
+ 'CONCURRENCY': 5,
119
+ 'DOWNLOAD_DELAY': 1.0
120
+ }
121
+
122
+ def parse(self, response):
123
+ # 提取数据
124
+ data = response.css('title::text').get()
125
+ yield {'title': data}
126
+
127
+ # 生成新请求
128
+ for link in response.css('a::attr(href)').getall():
129
+ yield Request(url=link, callback=self.parse_detail)
130
+ """
131
+
132
+ # 必须定义的属性
133
+ name: str = None
134
+
135
+ # 可选属性
136
+ start_urls: List[str] = None
137
+ custom_settings: Dict[str, Any] = None
138
+ allowed_domains: List[str] = None
139
+
140
+ def __init__(self, name: str = None, **kwargs):
141
+ """
142
+ 初始化爬虫实例
143
+
144
+ :param name: 爬虫名称(可选,默认使用类属性)
145
+ :param kwargs: 其他初始化参数
146
+ """
147
+ # 初始化基本属性
148
+ if not hasattr(self, 'start_urls') or self.start_urls is None:
149
+ self.start_urls = []
150
+ if not hasattr(self, 'custom_settings') or self.custom_settings is None:
151
+ self.custom_settings = {}
152
+ if not hasattr(self, 'allowed_domains') or self.allowed_domains is None:
153
+ self.allowed_domains = []
154
+
155
+ # 设置爬虫名称
156
+ self.name = name or self.name
157
+ if not self.name:
158
+ raise ValueError(f"爬虫 {self.__class__.__name__} 必须指定 name 属性")
159
+
160
+ # 初始化其他属性
161
+ self.crawler = None
162
+ # 延迟初始化logger避免阻塞
163
+ self._logger = None
164
+ self.stats = None
165
+
166
+ # 应用额外参数
167
+ for key, value in kwargs.items():
168
+ setattr(self, key, value)
169
+
170
+ @property
171
+ def logger(self):
172
+ """延迟初始化logger"""
173
+ if self._logger is None:
174
+ from crawlo.utils.log import get_logger
175
+ self._logger = get_logger(self.name)
176
+ return self._logger
177
+
178
+ @classmethod
179
+ def create_instance(cls, crawler) -> 'Spider':
180
+ """
181
+ 创建爬虫实例并绑定 crawler
182
+
183
+ :param crawler: Crawler 实例
184
+ :return: 爬虫实例
185
+ """
186
+ spider = cls()
187
+ spider.crawler = crawler
188
+ spider.stats = getattr(crawler, 'stats', None)
189
+
190
+ # 合并自定义设置 - 使用延迟应用避免初始化时的循环依赖
191
+ if hasattr(spider, 'custom_settings') and spider.custom_settings:
192
+ # 延迟到真正需要时才应用设置
193
+ spider._pending_settings = spider.custom_settings.copy()
194
+ spider.logger.debug(f"准备应用 {len(spider.custom_settings)} 项自定义设置")
195
+
196
+ return spider
197
+
198
+ def apply_pending_settings(self):
199
+ """应用待处理的设置(在初始化完成后调用)"""
200
+ if hasattr(self, '_pending_settings') and self._pending_settings:
201
+ for key, value in self._pending_settings.items():
202
+ if self.crawler and hasattr(self.crawler, 'settings'):
203
+ self.crawler.settings.set(key, value)
204
+ self.logger.debug(f"应用自定义设置: {key} = {value}")
205
+ # 清除待处理的设置
206
+ delattr(self, '_pending_settings')
207
+
208
+ def start_requests(self) -> Iterator[Request]:
209
+ """
210
+ 生成初始请求
211
+
212
+ 默认行为:
213
+ - 使用 start_urls 生成请求
214
+ - 智能检测分布式模式决定是否去重
215
+ - 支持单个 start_url 属性(兼容性)
216
+ - 支持批量生成优化(大规模URL场景)
217
+
218
+ :return: Request 迭代器
219
+ """
220
+ # 检测是否为分布式模式
221
+ is_distributed = self._is_distributed_mode()
222
+
223
+ # 获取批量处理配置
224
+ batch_size = self._get_batch_size()
225
+
226
+ # 从 start_urls 生成请求
227
+ if self.start_urls:
228
+ generated_count = 0
229
+ for url in self.start_urls:
230
+ if self._is_allowed_domain(url):
231
+ yield Request(
232
+ url=url,
233
+ callback=self.parse,
234
+ dont_filter=not is_distributed,
235
+ meta={'spider_name': self.name}
236
+ )
237
+ generated_count += 1
238
+
239
+ # 大规模URL时进行批量控制
240
+ if batch_size > 0 and generated_count % batch_size == 0:
241
+ self.logger.debug(f"已生成 {generated_count} 个请求(批量大小: {batch_size})")
242
+ else:
243
+ self.logger.warning(f"跳过不允许的域名: {url}")
244
+
245
+ # 兼容单个 start_url 属性
246
+ elif hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
247
+ url = getattr(self, 'start_url')
248
+ if self._is_allowed_domain(url):
249
+ yield Request(
250
+ url=url,
251
+ callback=self.parse,
252
+ dont_filter=not is_distributed,
253
+ meta={'spider_name': self.name}
254
+ )
255
+ else:
256
+ self.logger.warning(f"跳过不允许的域名: {url}")
257
+
258
+ else:
259
+ self.logger.warning(
260
+ f"爬虫 {self.name} 没有定义 start_urls 或 start_url。\n"
261
+ f"请在爬虫类中定义或重写 start_requests() 方法。"
262
+ )
263
+
264
+ def _get_batch_size(self) -> int:
265
+ """
266
+ 获取批量处理大小配置
267
+
268
+ 用于大规模URL场景的性能优化
269
+
270
+ :return: 批量大小(0表示无限制)
271
+ """
272
+ if not self.crawler:
273
+ return 0
274
+
275
+ # 从设置中获取批量大小
276
+ batch_size = self.crawler.settings.get_int('SPIDER_BATCH_SIZE', 0)
277
+
278
+ # 如果start_urls超过一定数量,自动启用批量模式
279
+ if batch_size == 0 and self.start_urls and len(self.start_urls) > 1000:
280
+ batch_size = 500 # 默认批量大小
281
+ self.logger.info(f"检测到大量start_urls ({len(self.start_urls)}),启用批量模式 (批量大小: {batch_size})")
282
+
283
+ return batch_size
284
+
285
+ def _is_distributed_mode(self) -> bool:
286
+ """
287
+ 智能检测是否为分布式模式
288
+
289
+ 检测条件:
290
+ - QUEUE_TYPE = 'redis'
291
+ - FILTER_CLASS 包含 'aioredis_filter'
292
+ - RUN_MODE = 'distributed'
293
+
294
+ :return: 是否为分布式模式
295
+ """
296
+ if not self.crawler:
297
+ return False
298
+
299
+ settings = self.crawler.settings
300
+
301
+ # 检查多个条件来判断是否为分布式模式
302
+ queue_type = settings.get('QUEUE_TYPE', 'memory')
303
+ filter_class = settings.get('FILTER_CLASS', '')
304
+ run_mode = settings.get('RUN_MODE', 'standalone')
305
+
306
+ # 分布式模式的标志
307
+ is_redis_queue = queue_type == 'redis'
308
+ is_redis_filter = 'aioredis_filter' in filter_class.lower()
309
+ is_distributed_run_mode = run_mode == 'distributed'
310
+
311
+ distributed = is_redis_queue or is_redis_filter or is_distributed_run_mode
312
+
313
+ if distributed:
314
+ self.logger.debug("检测到分布式模式,启用请求去重")
315
+ else:
316
+ self.logger.debug("检测到单机模式,禁用请求去重")
317
+
318
+ return distributed
319
+
320
+ def _is_allowed_domain(self, url: str) -> bool:
321
+ """
322
+ 检查URL是否在允许的域名列表中
323
+
324
+ :param url: 要检查的URL
325
+ :return: 是否允许
326
+ """
327
+ if not self.allowed_domains:
328
+ return True
329
+
330
+ from urllib.parse import urlparse
331
+ try:
332
+ domain = urlparse(url).netloc.lower()
333
+ return any(
334
+ domain == allowed.lower() or domain.endswith('.' + allowed.lower())
335
+ for allowed in self.allowed_domains
336
+ )
337
+ except Exception as e:
338
+ self.logger.warning(f"URL解析失败: {url} - {e}")
339
+ return False
340
+
341
+ def parse(self, response):
342
+ """
343
+ 解析响应的主方法(必须实现)
344
+
345
+ :param response: 响应对象
346
+ :return: 生成的 Item 或 Request
347
+ """
348
+ raise NotImplementedError(
349
+ f"爬虫 {self.__class__.__name__} 必须实现 parse() 方法\n"
350
+ f"示例:\n"
351
+ f"def parse(self, response):\n"
352
+ f" # 提取数据\n"
353
+ f" yield {{'title': response.css('title::text').get()}}\n"
354
+ f" # 生成新请求\n"
355
+ f" for link in response.css('a::attr(href)').getall():\n"
356
+ f" yield Request(url=link)"
357
+ )
358
+
359
+ async def spider_opened(self):
360
+ """
361
+ 爬虫开启时调用的钩子函数
362
+
363
+ 可用于:
364
+ - 初始化资源
365
+ - 连接数据库
366
+ - 设置初始状态
367
+ """
368
+ self.logger.info(f"Spider {self.name} opened")
369
+
370
+ async def spider_closed(self):
371
+ """
372
+ 爬虫关闭时调用的钩子函数
373
+
374
+ 可用于:
375
+ - 清理资源
376
+ - 关闭数据库连接
377
+ """
378
+ # 不再输出任何信息,避免与统计信息重复
379
+ # 统计信息由StatsCollector负责输出
380
+ pass
381
+
382
+ def __str__(self) -> str:
383
+ return f"{self.__class__.__name__}(name='{self.name}')"
384
+
385
+ def __repr__(self) -> str:
386
+ return self.__str__()
387
+
388
+ def set_custom_setting(self, key: str, value: Any) -> 'Spider':
389
+ """
390
+ 设置自定义配置(链式调用)
391
+
392
+ :param key: 配置键名
393
+ :param value: 配置值
394
+ :return: self(支持链式调用)
395
+
396
+ 示例:
397
+ spider.set_custom_setting('CONCURRENCY', 10)\
398
+ .set_custom_setting('DOWNLOAD_DELAY', 1.0)
399
+ """
400
+ if not hasattr(self, 'custom_settings') or self.custom_settings is None:
401
+ self.custom_settings = {}
402
+
403
+ self.custom_settings[key] = value
404
+ self.logger.debug(f"设置自定义配置: {key} = {value}")
405
+
406
+ # 如果已绑定crawler,立即应用设置
407
+ if self.crawler:
408
+ self.crawler.settings.set(key, value)
409
+
410
+ return self
411
+
412
+ def get_custom_setting(self, key: str, default: Any = None) -> Any:
413
+ """
414
+ 获取自定义配置值
415
+
416
+ :param key: 配置键名
417
+ :param default: 默认值
418
+ :return: 配置值
419
+ """
420
+ if hasattr(self, 'custom_settings') and self.custom_settings:
421
+ return self.custom_settings.get(key, default)
422
+ return default
423
+
424
+ def get_spider_info(self) -> Dict[str, Any]:
425
+ """
426
+ 获取爬虫详细信息
427
+
428
+ :return: 爬虫信息字典
429
+ """
430
+ info = {
431
+ 'name': self.name,
432
+ 'class_name': self.__class__.__name__,
433
+ 'module': self.__module__,
434
+ 'start_urls_count': len(self.start_urls) if self.start_urls else 0,
435
+ 'allowed_domains_count': len(self.allowed_domains) if self.allowed_domains else 0,
436
+ 'custom_settings_count': len(self.custom_settings) if self.custom_settings else 0,
437
+ 'is_distributed': self._is_distributed_mode() if self.crawler else None,
438
+ 'has_crawler': self.crawler is not None,
439
+ 'logger_name': self.logger.name if hasattr(self, 'logger') else None
440
+ }
441
+
442
+ # 添加方法检查
443
+ info['methods'] = {
444
+ 'has_parse': callable(getattr(self, 'parse', None)),
445
+ 'has_spider_opened': callable(getattr(self, 'spider_opened', None)),
446
+ 'has_spider_closed': callable(getattr(self, 'spider_closed', None)),
447
+ 'has_start_requests': callable(getattr(self, 'start_requests', None))
448
+ }
449
+
450
+ return info
451
+
452
+ def make_request(self, url: str, callback=None, **kwargs) -> Request:
453
+ """
454
+ 便捷方法:创建 Request 对象
455
+
456
+ :param url: 请求URL
457
+ :param callback: 回调函数(默认为parse)
458
+ :param kwargs: 其他Request参数
459
+ :return: Request对象
460
+ """
461
+ return Request(
462
+ url=url,
463
+ callback=callback or self.parse,
464
+ meta={'spider_name': self.name},
465
+ **kwargs
466
+ )
467
+
468
+
469
+ # === 高级爬虫功能扩展 ===
470
+
471
+ class SpiderStatsTracker:
472
+ """
473
+ 爬虫统计跟踪器
474
+ 提供详细的性能监控功能
475
+ """
476
+
477
+ def __init__(self, spider_name: str):
478
+ self.spider_name = spider_name
479
+ self.start_time = None
480
+ self.end_time = None
481
+ self.request_count = 0
482
+ self.response_count = 0
483
+ self.item_count = 0
484
+ self.error_count = 0
485
+ self.domain_stats = {}
486
+
487
+ def start_tracking(self):
488
+ """开始统计"""
489
+ import time
490
+ self.start_time = time.time()
491
+
492
+ def stop_tracking(self):
493
+ """停止统计"""
494
+ import time
495
+ self.end_time = time.time()
496
+
497
+ def record_request(self, url: str):
498
+ """记录请求"""
499
+ self.request_count += 1
500
+ from urllib.parse import urlparse
501
+ domain = urlparse(url).netloc
502
+ self.domain_stats[domain] = self.domain_stats.get(domain, 0) + 1
503
+
504
+ def record_response(self):
505
+ """记录响应"""
506
+ self.response_count += 1
507
+
508
+ def record_item(self):
509
+ """记录Item"""
510
+ self.item_count += 1
511
+
512
+ def record_error(self):
513
+ """记录错误"""
514
+ self.error_count += 1
515
+
516
+ def get_summary(self) -> Dict[str, Any]:
517
+ """获取统计摘要"""
518
+ duration = (self.end_time - self.start_time) if (self.start_time and self.end_time) else 0
519
+
520
+ return {
521
+ 'spider_name': self.spider_name,
522
+ 'duration_seconds': round(duration, 2),
523
+ 'requests': self.request_count,
524
+ 'responses': self.response_count,
525
+ 'items': self.item_count,
526
+ 'errors': self.error_count,
527
+ 'success_rate': round((self.response_count / max(1, self.request_count)) * 100, 2),
528
+ 'requests_per_second': round(self.request_count / max(1, duration), 2),
529
+ 'top_domains': sorted(
530
+ self.domain_stats.items(),
531
+ key=lambda x: x[1],
532
+ reverse=True
533
+ )[:5]
534
+ }
535
+
536
+
537
+ def create_spider_from_template(name: str, start_urls: List[str], **options) -> Type[Spider]:
538
+ """
539
+ 从模板快速创建爬虫类
540
+
541
+ :param name: 爬虫名称
542
+ :param start_urls: 起始URL列表
543
+ :param options: 其他选项
544
+ :return: 新创建的爬虫类
545
+
546
+ 示例:
547
+ MySpider = create_spider_from_template(
548
+ name='quick_spider',
549
+ start_urls=['http://example.com'],
550
+ allowed_domains=['example.com'],
551
+ custom_settings={'CONCURRENCY': 5}
552
+ )
553
+ """
554
+
555
+ # 动态创建爬虫类
556
+ class_attrs = {
557
+ 'name': name,
558
+ 'start_urls': start_urls,
559
+ 'allowed_domains': options.get('allowed_domains', []),
560
+ 'custom_settings': options.get('custom_settings', {})
561
+ }
562
+
563
+ # 添加自定义parse方法
564
+ if 'parse_function' in options:
565
+ class_attrs['parse'] = options['parse_function']
566
+ else:
567
+ def default_parse(self, response):
568
+ """默认解析方法"""
569
+ yield {'url': response.url, 'title': getattr(response, 'title', 'N/A')}
570
+ class_attrs['parse'] = default_parse
571
+
572
+ # 创建类名
573
+ class_name = options.get('class_name', f"Generated{name.replace('_', '').title()}Spider")
574
+
575
+ # 动态创建类
576
+ spider_class = type(class_name, (Spider,), class_attrs)
577
+
578
+ get_logger(__name__).info(f"动态创建爬虫类: {class_name} (name='{name}')")
579
+
580
+ return spider_class
581
+
582
+
583
+ # === 公共只读接口 ===
584
+ def get_global_spider_registry() -> dict[str, Type[Spider]]:
585
+ """
586
+ 获取全局爬虫注册表的副本
587
+
588
+ :return: 爬虫注册表的副本
589
+ """
590
+ return _DEFAULT_SPIDER_REGISTRY.copy()
591
+
592
+
593
+ def get_spider_by_name(name: str) -> Optional[Type[Spider]]:
594
+ """
595
+ 根据名称获取爬虫类
596
+
597
+ :param name: 爬虫名称
598
+ :return: 爬虫类或None
599
+ """
600
+ return _DEFAULT_SPIDER_REGISTRY.get(name)
601
+
602
+
603
+ def get_all_spider_classes() -> List[Type[Spider]]:
604
+ """
605
+ 获取所有注册的爬虫类
606
+
607
+ :return: 爬虫类列表
608
+ """
609
+ return list(set(_DEFAULT_SPIDER_REGISTRY.values()))
610
+
611
+
612
+ def get_spider_names() -> List[str]:
613
+ """
614
+ 获取所有爬虫名称
615
+
616
+ :return: 爬虫名称列表
617
+ """
618
+ return list(_DEFAULT_SPIDER_REGISTRY.keys())
619
+
620
+
621
+ def is_spider_registered(name: str) -> bool:
622
+ """
623
+ 检查爬虫是否已注册
624
+
625
+ :param name: 爬虫名称
626
+ :return: 是否已注册
627
+ """
628
+ return name in _DEFAULT_SPIDER_REGISTRY
629
+
630
+
631
+ def unregister_spider(name: str) -> bool:
632
+ """
633
+ 取消注册爬虫(仅用于测试)
634
+
635
+ :param name: 爬虫名称
636
+ :return: 是否成功取消注册
637
+ """
638
+ if name in _DEFAULT_SPIDER_REGISTRY:
639
+ del _DEFAULT_SPIDER_REGISTRY[name]
640
+ return True
641
+ return False
642
+
643
+
644
+ # 导出的公共接口
645
+ __all__ = [
646
+ 'Spider',
647
+ 'SpiderMeta',
648
+ 'SpiderStatsTracker',
649
+ 'create_spider_from_template',
650
+ 'get_global_spider_registry',
651
+ 'get_spider_by_name',
652
+ 'get_all_spider_classes',
653
+ 'get_spider_names',
654
+ 'is_spider_registered',
655
+ 'unregister_spider'
656
+ ]
657
+