crawlo 1.2.2__py3-none-any.whl → 1.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (222) hide show
  1. crawlo/__init__.py +61 -61
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +60 -60
  4. crawlo/cleaners/data_formatter.py +225 -225
  5. crawlo/cleaners/encoding_converter.py +125 -125
  6. crawlo/cleaners/text_cleaner.py +232 -232
  7. crawlo/cli.py +81 -81
  8. crawlo/commands/__init__.py +14 -14
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/help.py +144 -142
  12. crawlo/commands/list.py +155 -155
  13. crawlo/commands/run.py +323 -292
  14. crawlo/commands/startproject.py +420 -418
  15. crawlo/commands/stats.py +188 -188
  16. crawlo/commands/utils.py +186 -186
  17. crawlo/config.py +312 -312
  18. crawlo/config_validator.py +251 -252
  19. crawlo/core/__init__.py +2 -2
  20. crawlo/core/engine.py +354 -354
  21. crawlo/core/processor.py +40 -40
  22. crawlo/core/scheduler.py +143 -143
  23. crawlo/crawler.py +1110 -1027
  24. crawlo/data/__init__.py +6 -0
  25. crawlo/data/user_agents.py +108 -0
  26. crawlo/downloader/__init__.py +266 -266
  27. crawlo/downloader/aiohttp_downloader.py +220 -220
  28. crawlo/downloader/cffi_downloader.py +256 -256
  29. crawlo/downloader/httpx_downloader.py +259 -259
  30. crawlo/downloader/hybrid_downloader.py +212 -213
  31. crawlo/downloader/playwright_downloader.py +402 -402
  32. crawlo/downloader/selenium_downloader.py +472 -472
  33. crawlo/event.py +11 -11
  34. crawlo/exceptions.py +81 -81
  35. crawlo/extension/__init__.py +37 -37
  36. crawlo/extension/health_check.py +141 -141
  37. crawlo/extension/log_interval.py +57 -57
  38. crawlo/extension/log_stats.py +81 -81
  39. crawlo/extension/logging_extension.py +43 -43
  40. crawlo/extension/memory_monitor.py +104 -104
  41. crawlo/extension/performance_profiler.py +133 -133
  42. crawlo/extension/request_recorder.py +107 -107
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +280 -280
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/items/__init__.py +23 -23
  47. crawlo/items/base.py +21 -21
  48. crawlo/items/fields.py +52 -53
  49. crawlo/items/items.py +104 -104
  50. crawlo/middleware/__init__.py +21 -21
  51. crawlo/middleware/default_header.py +131 -131
  52. crawlo/middleware/download_delay.py +104 -104
  53. crawlo/middleware/middleware_manager.py +135 -135
  54. crawlo/middleware/offsite.py +114 -115
  55. crawlo/middleware/proxy.py +367 -366
  56. crawlo/middleware/request_ignore.py +86 -87
  57. crawlo/middleware/response_code.py +163 -164
  58. crawlo/middleware/response_filter.py +136 -137
  59. crawlo/middleware/retry.py +124 -124
  60. crawlo/mode_manager.py +211 -211
  61. crawlo/network/__init__.py +21 -21
  62. crawlo/network/request.py +338 -338
  63. crawlo/network/response.py +359 -359
  64. crawlo/pipelines/__init__.py +21 -21
  65. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  66. crawlo/pipelines/console_pipeline.py +39 -39
  67. crawlo/pipelines/csv_pipeline.py +316 -316
  68. crawlo/pipelines/database_dedup_pipeline.py +222 -224
  69. crawlo/pipelines/json_pipeline.py +218 -218
  70. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  71. crawlo/pipelines/mongo_pipeline.py +131 -131
  72. crawlo/pipelines/mysql_pipeline.py +317 -316
  73. crawlo/pipelines/pipeline_manager.py +61 -61
  74. crawlo/pipelines/redis_dedup_pipeline.py +165 -167
  75. crawlo/project.py +279 -187
  76. crawlo/queue/pqueue.py +37 -37
  77. crawlo/queue/queue_manager.py +337 -337
  78. crawlo/queue/redis_priority_queue.py +298 -298
  79. crawlo/settings/__init__.py +7 -7
  80. crawlo/settings/default_settings.py +217 -226
  81. crawlo/settings/setting_manager.py +122 -122
  82. crawlo/spider/__init__.py +639 -639
  83. crawlo/stats_collector.py +59 -59
  84. crawlo/subscriber.py +129 -130
  85. crawlo/task_manager.py +30 -30
  86. crawlo/templates/crawlo.cfg.tmpl +10 -10
  87. crawlo/templates/project/__init__.py.tmpl +3 -3
  88. crawlo/templates/project/items.py.tmpl +17 -17
  89. crawlo/templates/project/middlewares.py.tmpl +118 -118
  90. crawlo/templates/project/pipelines.py.tmpl +96 -96
  91. crawlo/templates/project/run.py.tmpl +47 -45
  92. crawlo/templates/project/settings.py.tmpl +350 -327
  93. crawlo/templates/project/settings_distributed.py.tmpl +160 -119
  94. crawlo/templates/project/settings_gentle.py.tmpl +133 -94
  95. crawlo/templates/project/settings_high_performance.py.tmpl +155 -151
  96. crawlo/templates/project/settings_simple.py.tmpl +108 -68
  97. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  98. crawlo/templates/spider/spider.py.tmpl +143 -143
  99. crawlo/tools/__init__.py +182 -182
  100. crawlo/tools/anti_crawler.py +268 -268
  101. crawlo/tools/authenticated_proxy.py +240 -240
  102. crawlo/tools/data_validator.py +180 -180
  103. crawlo/tools/date_tools.py +35 -35
  104. crawlo/tools/distributed_coordinator.py +386 -386
  105. crawlo/tools/retry_mechanism.py +220 -220
  106. crawlo/tools/scenario_adapter.py +262 -262
  107. crawlo/utils/__init__.py +35 -35
  108. crawlo/utils/batch_processor.py +259 -260
  109. crawlo/utils/controlled_spider_mixin.py +439 -439
  110. crawlo/utils/date_tools.py +290 -290
  111. crawlo/utils/db_helper.py +343 -343
  112. crawlo/utils/enhanced_error_handler.py +356 -359
  113. crawlo/utils/env_config.py +105 -105
  114. crawlo/utils/error_handler.py +123 -125
  115. crawlo/utils/func_tools.py +82 -82
  116. crawlo/utils/large_scale_config.py +286 -286
  117. crawlo/utils/large_scale_helper.py +344 -343
  118. crawlo/utils/log.py +128 -128
  119. crawlo/utils/performance_monitor.py +285 -284
  120. crawlo/utils/queue_helper.py +175 -175
  121. crawlo/utils/redis_connection_pool.py +334 -334
  122. crawlo/utils/redis_key_validator.py +198 -199
  123. crawlo/utils/request.py +267 -267
  124. crawlo/utils/request_serializer.py +218 -219
  125. crawlo/utils/spider_loader.py +61 -62
  126. crawlo/utils/system.py +11 -11
  127. crawlo/utils/tools.py +4 -4
  128. crawlo/utils/url.py +39 -39
  129. {crawlo-1.2.2.dist-info → crawlo-1.2.4.dist-info}/METADATA +764 -692
  130. crawlo-1.2.4.dist-info/RECORD +206 -0
  131. examples/__init__.py +7 -7
  132. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
  133. tests/__init__.py +7 -7
  134. tests/advanced_tools_example.py +275 -275
  135. tests/authenticated_proxy_example.py +236 -236
  136. tests/cleaners_example.py +160 -160
  137. tests/config_validation_demo.py +102 -102
  138. tests/controlled_spider_example.py +205 -205
  139. tests/date_tools_example.py +180 -180
  140. tests/dynamic_loading_example.py +523 -523
  141. tests/dynamic_loading_test.py +104 -104
  142. tests/env_config_example.py +133 -133
  143. tests/error_handling_example.py +171 -171
  144. tests/redis_key_validation_demo.py +130 -130
  145. tests/response_improvements_example.py +144 -144
  146. tests/test_advanced_tools.py +148 -148
  147. tests/test_all_redis_key_configs.py +145 -145
  148. tests/test_authenticated_proxy.py +141 -141
  149. tests/test_cleaners.py +54 -54
  150. tests/test_comprehensive.py +146 -146
  151. tests/test_config_validator.py +193 -193
  152. tests/test_crawlo_proxy_integration.py +172 -172
  153. tests/test_date_tools.py +123 -123
  154. tests/test_default_header_middleware.py +158 -158
  155. tests/test_double_crawlo_fix.py +207 -207
  156. tests/test_double_crawlo_fix_simple.py +124 -124
  157. tests/test_download_delay_middleware.py +221 -221
  158. tests/test_downloader_proxy_compatibility.py +268 -268
  159. tests/test_dynamic_downloaders_proxy.py +124 -124
  160. tests/test_dynamic_proxy.py +92 -92
  161. tests/test_dynamic_proxy_config.py +146 -146
  162. tests/test_dynamic_proxy_real.py +109 -109
  163. tests/test_edge_cases.py +303 -303
  164. tests/test_enhanced_error_handler.py +270 -270
  165. tests/test_env_config.py +121 -121
  166. tests/test_error_handler_compatibility.py +112 -112
  167. tests/test_final_validation.py +153 -153
  168. tests/test_framework_env_usage.py +103 -103
  169. tests/test_integration.py +356 -356
  170. tests/test_item_dedup_redis_key.py +122 -122
  171. tests/test_offsite_middleware.py +221 -221
  172. tests/test_parsel.py +29 -29
  173. tests/test_performance.py +327 -327
  174. tests/test_proxy_api.py +264 -264
  175. tests/test_proxy_health_check.py +32 -32
  176. tests/test_proxy_middleware.py +121 -121
  177. tests/test_proxy_middleware_enhanced.py +216 -216
  178. tests/test_proxy_middleware_integration.py +136 -136
  179. tests/test_proxy_providers.py +56 -56
  180. tests/test_proxy_stats.py +19 -19
  181. tests/test_proxy_strategies.py +59 -59
  182. tests/test_queue_manager_double_crawlo.py +173 -173
  183. tests/test_queue_manager_redis_key.py +176 -176
  184. tests/test_real_scenario_proxy.py +195 -195
  185. tests/test_redis_config.py +28 -28
  186. tests/test_redis_connection_pool.py +294 -294
  187. tests/test_redis_key_naming.py +181 -181
  188. tests/test_redis_key_validator.py +123 -123
  189. tests/test_redis_queue.py +224 -224
  190. tests/test_request_ignore_middleware.py +182 -182
  191. tests/test_request_serialization.py +70 -70
  192. tests/test_response_code_middleware.py +349 -349
  193. tests/test_response_filter_middleware.py +427 -427
  194. tests/test_response_improvements.py +152 -152
  195. tests/test_retry_middleware.py +241 -241
  196. tests/test_scheduler.py +241 -241
  197. tests/test_simple_response.py +61 -61
  198. tests/test_telecom_spider_redis_key.py +205 -205
  199. tests/test_template_content.py +87 -87
  200. tests/test_template_redis_key.py +134 -134
  201. tests/test_tools.py +153 -153
  202. tests/tools_example.py +257 -257
  203. crawlo-1.2.2.dist-info/RECORD +0 -220
  204. examples/aiohttp_settings.py +0 -42
  205. examples/curl_cffi_settings.py +0 -41
  206. examples/default_header_middleware_example.py +0 -107
  207. examples/default_header_spider_example.py +0 -129
  208. examples/download_delay_middleware_example.py +0 -160
  209. examples/httpx_settings.py +0 -42
  210. examples/multi_downloader_proxy_example.py +0 -81
  211. examples/offsite_middleware_example.py +0 -55
  212. examples/offsite_spider_example.py +0 -107
  213. examples/proxy_spider_example.py +0 -166
  214. examples/request_ignore_middleware_example.py +0 -51
  215. examples/request_ignore_spider_example.py +0 -99
  216. examples/response_code_middleware_example.py +0 -52
  217. examples/response_filter_middleware_example.py +0 -67
  218. examples/tong_hua_shun_settings.py +0 -62
  219. examples/tong_hua_shun_spider.py +0 -170
  220. {crawlo-1.2.2.dist-info → crawlo-1.2.4.dist-info}/WHEEL +0 -0
  221. {crawlo-1.2.2.dist-info → crawlo-1.2.4.dist-info}/entry_points.txt +0 -0
  222. {crawlo-1.2.2.dist-info → crawlo-1.2.4.dist-info}/top_level.txt +0 -0
@@ -1,259 +1,259 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import httpx
4
- from typing import Optional
5
- from httpx import AsyncClient, Timeout, Limits
6
-
7
- from crawlo.network.response import Response
8
- from crawlo.downloader import DownloaderBase
9
- from crawlo.utils.log import get_logger
10
-
11
- # 尝试导入 httpx 异常,用于更精确地捕获
12
- try:
13
- # httpx 0.23.0+ 将异常移到了 _exceptions
14
- from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
- except ImportError:
16
- try:
17
- # 旧版本可能在 httpcore 或顶层
18
- from httpcore import ConnectError
19
- from httpx import TimeoutException, NetworkError, HTTPStatusError
20
- except ImportError:
21
- ConnectError = httpx.ConnectError
22
- TimeoutException = httpx.TimeoutException
23
- NetworkError = httpx.NetworkError
24
- HTTPStatusError = httpx.HTTPStatusError
25
-
26
- # 定义我们认为是网络问题,应该触发降级的异常
27
- NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
-
29
-
30
- class HttpXDownloader(DownloaderBase):
31
- """
32
- 基于 httpx 的高性能异步下载器
33
- - 使用持久化 AsyncClient(推荐做法)
34
- - 支持连接池、HTTP/2、透明代理
35
- - 智能处理 Request 的 json_body 和 form_data
36
- - 支持代理失败后自动降级为直连
37
- """
38
-
39
- def __init__(self, crawler):
40
- super().__init__(crawler)
41
- self._client: Optional[AsyncClient] = None
42
- self._client_timeout: Optional[Timeout] = None
43
- self._client_limits: Optional[Limits] = None
44
- self._client_verify: bool = True
45
- self._client_http2: bool = False
46
- self.max_download_size: Optional[int] = None
47
- # ------------------------
48
- self._timeout: Optional[Timeout] = None
49
- self._limits: Optional[Limits] = None
50
- # --- 获取 logger 实例 ---
51
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
52
-
53
- def open(self):
54
- super().open()
55
- self.logger.info("Opening HttpXDownloader")
56
-
57
- # 读取配置
58
- timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
59
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
60
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
61
- max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
62
-
63
- # 保存配置
64
- self.max_download_size = max_download_size
65
-
66
- # --- 保存客户端配置以便复用 ---
67
- self._client_timeout = Timeout(
68
- connect=10.0, # 建立连接超时
69
- read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
70
- write=10.0, # 发送数据超时
71
- pool=1.0 # 从连接池获取连接的超时
72
- )
73
- self._client_limits = Limits(
74
- max_connections=pool_limit,
75
- max_keepalive_connections=pool_per_host
76
- )
77
- self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
78
- self._client_http2 = True # 启用 HTTP/2 支持
79
- # ----------------------------
80
-
81
- # 创建持久化客户端 (不在此处设置全局代理)
82
- self._client = AsyncClient(
83
- timeout=self._client_timeout,
84
- limits=self._client_limits,
85
- verify=self._client_verify,
86
- http2=self._client_http2,
87
- follow_redirects=True, # 自动跟随重定向
88
- # 注意:此处不设置 proxy 或 proxies
89
- )
90
-
91
- self.logger.debug("HttpXDownloader initialized.")
92
-
93
- async def download(self, request) -> Optional[Response]:
94
- """下载请求并返回响应,支持代理失败后的优雅降级"""
95
- if not self._client:
96
- raise RuntimeError("HttpXDownloader client is not available.")
97
-
98
- start_time = None
99
- if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
100
- import time
101
- start_time = time.time()
102
-
103
- # --- 1. 确定要使用的 client 实例 ---
104
- effective_client = self._client # 默认使用共享的主 client
105
- temp_client = None # 用于可能创建的临时 client
106
- used_proxy = None # 记录当前尝试使用的代理
107
-
108
- try:
109
- # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
110
- kwargs = {
111
- "method": request.method,
112
- "url": request.url,
113
- "headers": request.headers,
114
- "cookies": request.cookies,
115
- "follow_redirects": request.allow_redirects,
116
- }
117
-
118
- # 智能处理 body(关键优化)
119
- if hasattr(request, "_json_body") and request._json_body is not None:
120
- kwargs["json"] = request._json_body # 让 httpx 处理序列化
121
- elif isinstance(request.body, (dict, list)):
122
- kwargs["json"] = request.body
123
- else:
124
- kwargs["content"] = request.body # 使用 content 而不是 data
125
-
126
- # --- 3. 处理代理 ---
127
- httpx_proxy_config = None # 用于初始化临时 client 的代理配置
128
- if request.proxy:
129
- # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
130
- if isinstance(request.proxy, str):
131
- # 直接是代理 URL 字符串
132
- httpx_proxy_config = request.proxy
133
- elif isinstance(request.proxy, dict):
134
- # 从字典中选择合适的代理 URL
135
- # 优先选择与请求协议匹配的,否则 fallback 到 http
136
- from urllib.parse import urlparse
137
- request_scheme = urlparse(request.url).scheme
138
- if request_scheme == "https" and request.proxy.get("https"):
139
- httpx_proxy_config = request.proxy["https"]
140
- elif request.proxy.get("http"):
141
- httpx_proxy_config = request.proxy["http"]
142
- else:
143
- # 如果没有匹配的,尝试使用任意一个
144
- httpx_proxy_config = next(iter(request.proxy.values()), None)
145
- if httpx_proxy_config:
146
- self.logger.warning(
147
- f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
148
- )
149
-
150
- # 如果成功解析出代理配置,则创建临时 client
151
- if httpx_proxy_config:
152
- try:
153
- # --- 4. 创建临时 client,配置代理 ---
154
- # 使用在 open() 中保存的配置
155
- temp_client = AsyncClient(
156
- timeout=self._client_timeout,
157
- limits=self._client_limits,
158
- verify=self._client_verify,
159
- http2=self._client_http2,
160
- follow_redirects=True, # 确保继承
161
- proxy=httpx_proxy_config, # 设置代理
162
- )
163
- effective_client = temp_client
164
- used_proxy = httpx_proxy_config # 记录使用的代理
165
- self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
166
- except Exception as e:
167
- self.logger.error(
168
- f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
169
- # 出错则回退到使用主 client(无代理)
170
- # 可以选择抛出异常或继续
171
- # raise # 如果希望代理失败导致请求失败,取消注释
172
-
173
- # --- 5. 发送请求 (带降级逻辑) ---
174
- try:
175
- httpx_response = await effective_client.request(**kwargs)
176
- except NETWORK_EXCEPTIONS as proxy_error:
177
- # --- 优雅降级逻辑 ---
178
- # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
179
- if temp_client is not None and effective_client is temp_client:
180
- # 记录警告日志
181
- self.logger.warning(
182
- f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
183
- )
184
- # 关闭失败的临时客户端
185
- await temp_client.aclose()
186
- temp_client = None # 防止 finally 再次关闭
187
-
188
- # 切换到主客户端(直连)
189
- effective_client = self._client
190
- # 再次尝试发送请求
191
- httpx_response = await effective_client.request(**kwargs)
192
- else:
193
- # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
194
- raise
195
-
196
- # --- 6. 安全检查:防止大响应体 ---
197
- content_length = httpx_response.headers.get("Content-Length")
198
- if content_length and int(content_length) > self.max_download_size:
199
- await httpx_response.aclose() # 立即关闭连接,释放资源
200
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
201
-
202
- # --- 7. 读取响应体 ---
203
- body = await httpx_response.aread()
204
-
205
- # --- 8. 记录下载统计 ---
206
- if start_time:
207
- download_time = time.time() - start_time
208
- self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
209
-
210
- # --- 9. 构造并返回 Response ---
211
- return self.structure_response(request=request, response=httpx_response, body=body)
212
-
213
- except httpx.TimeoutException as e:
214
- self.logger.error(f"Timeout error for {request.url}: {e}")
215
- raise
216
- except httpx.NetworkError as e:
217
- self.logger.error(f"Network error for {request.url}: {e}")
218
- raise
219
- except httpx.HTTPStatusError as e:
220
- self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
221
- # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
222
- # 如果需要在此处 raise,可取消注释下一行
223
- # raise
224
- # 读取响应体以便 structure_response 处理
225
- try:
226
- error_body = await e.response.aread()
227
- except Exception:
228
- error_body = b"" # 如果读取错误响应体失败,则为空
229
- return self.structure_response(request=request, response=e.response, body=error_body)
230
- except Exception as e:
231
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
232
- raise
233
-
234
- finally:
235
- # --- 10. 清理:关闭临时 client ---
236
- # 如果创建了临时 client,则关闭它
237
- if temp_client:
238
- try:
239
- await temp_client.aclose()
240
- # self.logger.debug("Closed temporary client.")
241
- except Exception as e:
242
- self.logger.warning(f"Error closing temporary client: {e}")
243
-
244
- @staticmethod
245
- def structure_response(request, response: httpx.Response, body: bytes) -> Response:
246
- return Response(
247
- url=str(response.url), # httpx 的 URL 是对象,需转字符串
248
- headers=dict(response.headers),
249
- status_code=response.status_code, # 注意:使用 status_code
250
- body=body,
251
- request=request
252
- )
253
-
254
- async def close(self) -> None:
255
- """关闭主客户端"""
256
- if self._client:
257
- self.logger.info("Closing HttpXDownloader client...")
258
- await self._client.aclose()
259
- self.logger.debug("HttpXDownloader closed.")
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import httpx
4
+ from typing import Optional
5
+ from httpx import AsyncClient, Timeout, Limits
6
+
7
+ from crawlo.network.response import Response
8
+ from crawlo.downloader import DownloaderBase
9
+ from crawlo.utils.log import get_logger
10
+
11
+ # 尝试导入 httpx 异常,用于更精确地捕获
12
+ try:
13
+ # httpx 0.23.0+ 将异常移到了 _exceptions
14
+ from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
+ except ImportError:
16
+ try:
17
+ # 旧版本可能在 httpcore 或顶层
18
+ from httpcore import ConnectError
19
+ from httpx import TimeoutException, NetworkError, HTTPStatusError
20
+ except ImportError:
21
+ ConnectError = httpx.ConnectError
22
+ TimeoutException = httpx.TimeoutException
23
+ NetworkError = httpx.NetworkError
24
+ HTTPStatusError = httpx.HTTPStatusError
25
+
26
+ # 定义我们认为是网络问题,应该触发降级的异常
27
+ NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
+
29
+
30
+ class HttpXDownloader(DownloaderBase):
31
+ """
32
+ 基于 httpx 的高性能异步下载器
33
+ - 使用持久化 AsyncClient(推荐做法)
34
+ - 支持连接池、HTTP/2、透明代理
35
+ - 智能处理 Request 的 json_body 和 form_data
36
+ - 支持代理失败后自动降级为直连
37
+ """
38
+
39
+ def __init__(self, crawler):
40
+ super().__init__(crawler)
41
+ self._client: Optional[AsyncClient] = None
42
+ self._client_timeout: Optional[Timeout] = None
43
+ self._client_limits: Optional[Limits] = None
44
+ self._client_verify: bool = True
45
+ self._client_http2: bool = False
46
+ self.max_download_size: Optional[int] = None
47
+ # ------------------------
48
+ self._timeout: Optional[Timeout] = None
49
+ self._limits: Optional[Limits] = None
50
+ # --- 获取 logger 实例 ---
51
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
52
+
53
+ def open(self):
54
+ super().open()
55
+ self.logger.info("Opening HttpXDownloader")
56
+
57
+ # 读取配置
58
+ timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
59
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
60
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
61
+ max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
62
+
63
+ # 保存配置
64
+ self.max_download_size = max_download_size
65
+
66
+ # --- 保存客户端配置以便复用 ---
67
+ self._client_timeout = Timeout(
68
+ connect=10.0, # 建立连接超时
69
+ read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
70
+ write=10.0, # 发送数据超时
71
+ pool=1.0 # 从连接池获取连接的超时
72
+ )
73
+ self._client_limits = Limits(
74
+ max_connections=pool_limit,
75
+ max_keepalive_connections=pool_per_host
76
+ )
77
+ self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
78
+ self._client_http2 = True # 启用 HTTP/2 支持
79
+ # ----------------------------
80
+
81
+ # 创建持久化客户端 (不在此处设置全局代理)
82
+ self._client = AsyncClient(
83
+ timeout=self._client_timeout,
84
+ limits=self._client_limits,
85
+ verify=self._client_verify,
86
+ http2=self._client_http2,
87
+ follow_redirects=True, # 自动跟随重定向
88
+ # 注意:此处不设置 proxy 或 proxies
89
+ )
90
+
91
+ self.logger.debug("HttpXDownloader initialized.")
92
+
93
+ async def download(self, request) -> Optional[Response]:
94
+ """下载请求并返回响应,支持代理失败后的优雅降级"""
95
+ if not self._client:
96
+ raise RuntimeError("HttpXDownloader client is not available.")
97
+
98
+ start_time = None
99
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
100
+ import time
101
+ start_time = time.time()
102
+
103
+ # --- 1. 确定要使用的 client 实例 ---
104
+ effective_client = self._client # 默认使用共享的主 client
105
+ temp_client = None # 用于可能创建的临时 client
106
+ used_proxy = None # 记录当前尝试使用的代理
107
+
108
+ try:
109
+ # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
110
+ kwargs = {
111
+ "method": request.method,
112
+ "url": request.url,
113
+ "headers": request.headers,
114
+ "cookies": request.cookies,
115
+ "follow_redirects": request.allow_redirects,
116
+ }
117
+
118
+ # 智能处理 body(关键优化)
119
+ if hasattr(request, "_json_body") and request._json_body is not None:
120
+ kwargs["json"] = request._json_body # 让 httpx 处理序列化
121
+ elif isinstance(request.body, (dict, list)):
122
+ kwargs["json"] = request.body
123
+ else:
124
+ kwargs["content"] = request.body # 使用 content 而不是 data
125
+
126
+ # --- 3. 处理代理 ---
127
+ httpx_proxy_config = None # 用于初始化临时 client 的代理配置
128
+ if request.proxy:
129
+ # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
130
+ if isinstance(request.proxy, str):
131
+ # 直接是代理 URL 字符串
132
+ httpx_proxy_config = request.proxy
133
+ elif isinstance(request.proxy, dict):
134
+ # 从字典中选择合适的代理 URL
135
+ # 优先选择与请求协议匹配的,否则 fallback 到 http
136
+ from urllib.parse import urlparse
137
+ request_scheme = urlparse(request.url).scheme
138
+ if request_scheme == "https" and request.proxy.get("https"):
139
+ httpx_proxy_config = request.proxy["https"]
140
+ elif request.proxy.get("http"):
141
+ httpx_proxy_config = request.proxy["http"]
142
+ else:
143
+ # 如果没有匹配的,尝试使用任意一个
144
+ httpx_proxy_config = next(iter(request.proxy.values()), None)
145
+ if httpx_proxy_config:
146
+ self.logger.warning(
147
+ f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
148
+ )
149
+
150
+ # 如果成功解析出代理配置,则创建临时 client
151
+ if httpx_proxy_config:
152
+ try:
153
+ # --- 4. 创建临时 client,配置代理 ---
154
+ # 使用在 open() 中保存的配置
155
+ temp_client = AsyncClient(
156
+ timeout=self._client_timeout,
157
+ limits=self._client_limits,
158
+ verify=self._client_verify,
159
+ http2=self._client_http2,
160
+ follow_redirects=True, # 确保继承
161
+ proxy=httpx_proxy_config, # 设置代理
162
+ )
163
+ effective_client = temp_client
164
+ used_proxy = httpx_proxy_config # 记录使用的代理
165
+ self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
166
+ except Exception as e:
167
+ self.logger.error(
168
+ f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
169
+ # 出错则回退到使用主 client(无代理)
170
+ # 可以选择抛出异常或继续
171
+ # raise # 如果希望代理失败导致请求失败,取消注释
172
+
173
+ # --- 5. 发送请求 (带降级逻辑) ---
174
+ try:
175
+ httpx_response = await effective_client.request(**kwargs)
176
+ except NETWORK_EXCEPTIONS as proxy_error:
177
+ # --- 优雅降级逻辑 ---
178
+ # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
179
+ if temp_client is not None and effective_client is temp_client:
180
+ # 记录警告日志
181
+ self.logger.warning(
182
+ f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
183
+ )
184
+ # 关闭失败的临时客户端
185
+ await temp_client.aclose()
186
+ temp_client = None # 防止 finally 再次关闭
187
+
188
+ # 切换到主客户端(直连)
189
+ effective_client = self._client
190
+ # 再次尝试发送请求
191
+ httpx_response = await effective_client.request(**kwargs)
192
+ else:
193
+ # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
194
+ raise
195
+
196
+ # --- 6. 安全检查:防止大响应体 ---
197
+ content_length = httpx_response.headers.get("Content-Length")
198
+ if content_length and int(content_length) > self.max_download_size:
199
+ await httpx_response.aclose() # 立即关闭连接,释放资源
200
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
201
+
202
+ # --- 7. 读取响应体 ---
203
+ body = await httpx_response.aread()
204
+
205
+ # --- 8. 记录下载统计 ---
206
+ if start_time:
207
+ download_time = time.time() - start_time
208
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
209
+
210
+ # --- 9. 构造并返回 Response ---
211
+ return self.structure_response(request=request, response=httpx_response, body=body)
212
+
213
+ except httpx.TimeoutException as e:
214
+ self.logger.error(f"Timeout error for {request.url}: {e}")
215
+ raise
216
+ except httpx.NetworkError as e:
217
+ self.logger.error(f"Network error for {request.url}: {e}")
218
+ raise
219
+ except httpx.HTTPStatusError as e:
220
+ self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
221
+ # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
222
+ # 如果需要在此处 raise,可取消注释下一行
223
+ # raise
224
+ # 读取响应体以便 structure_response 处理
225
+ try:
226
+ error_body = await e.response.aread()
227
+ except Exception:
228
+ error_body = b"" # 如果读取错误响应体失败,则为空
229
+ return self.structure_response(request=request, response=e.response, body=error_body)
230
+ except Exception as e:
231
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
232
+ raise
233
+
234
+ finally:
235
+ # --- 10. 清理:关闭临时 client ---
236
+ # 如果创建了临时 client,则关闭它
237
+ if temp_client:
238
+ try:
239
+ await temp_client.aclose()
240
+ # self.logger.debug("Closed temporary client.")
241
+ except Exception as e:
242
+ self.logger.warning(f"Error closing temporary client: {e}")
243
+
244
+ @staticmethod
245
+ def structure_response(request, response: httpx.Response, body: bytes) -> Response:
246
+ return Response(
247
+ url=str(response.url), # httpx 的 URL 是对象,需转字符串
248
+ headers=dict(response.headers),
249
+ status_code=response.status_code, # 注意:使用 status_code
250
+ body=body,
251
+ request=request
252
+ )
253
+
254
+ async def close(self) -> None:
255
+ """关闭主客户端"""
256
+ if self._client:
257
+ self.logger.info("Closing HttpXDownloader client...")
258
+ await self._client.aclose()
259
+ self.logger.debug("HttpXDownloader closed.")