crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (113) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -162
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -257
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -154
  30. crawlo/filters/aioredis_filter.py +242 -242
  31. crawlo/filters/memory_filter.py +269 -269
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -248
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -125
  45. crawlo/mode_manager.py +200 -200
  46. crawlo/network/__init__.py +21 -21
  47. crawlo/network/request.py +311 -311
  48. crawlo/network/response.py +271 -269
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +316 -316
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +218 -218
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/pqueue.py +37 -37
  62. crawlo/queue/queue_manager.py +307 -303
  63. crawlo/queue/redis_priority_queue.py +208 -191
  64. crawlo/settings/__init__.py +7 -7
  65. crawlo/settings/default_settings.py +245 -226
  66. crawlo/settings/setting_manager.py +99 -99
  67. crawlo/spider/__init__.py +639 -639
  68. crawlo/stats_collector.py +59 -59
  69. crawlo/subscriber.py +106 -106
  70. crawlo/task_manager.py +30 -30
  71. crawlo/templates/crawlo.cfg.tmpl +10 -10
  72. crawlo/templates/project/__init__.py.tmpl +3 -3
  73. crawlo/templates/project/items.py.tmpl +17 -17
  74. crawlo/templates/project/middlewares.py.tmpl +86 -86
  75. crawlo/templates/project/pipelines.py.tmpl +341 -335
  76. crawlo/templates/project/run.py.tmpl +251 -238
  77. crawlo/templates/project/settings.py.tmpl +250 -247
  78. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  79. crawlo/templates/spider/spider.py.tmpl +177 -177
  80. crawlo/utils/__init__.py +7 -7
  81. crawlo/utils/controlled_spider_mixin.py +439 -335
  82. crawlo/utils/date_tools.py +233 -233
  83. crawlo/utils/db_helper.py +343 -343
  84. crawlo/utils/func_tools.py +82 -82
  85. crawlo/utils/large_scale_config.py +286 -286
  86. crawlo/utils/large_scale_helper.py +343 -343
  87. crawlo/utils/log.py +128 -128
  88. crawlo/utils/queue_helper.py +175 -175
  89. crawlo/utils/request.py +267 -267
  90. crawlo/utils/request_serializer.py +219 -219
  91. crawlo/utils/spider_loader.py +62 -62
  92. crawlo/utils/system.py +11 -11
  93. crawlo/utils/tools.py +4 -4
  94. crawlo/utils/url.py +39 -39
  95. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
  96. crawlo-1.1.3.dist-info/RECORD +113 -0
  97. examples/__init__.py +7 -7
  98. examples/controlled_spider_example.py +205 -0
  99. tests/__init__.py +7 -7
  100. tests/test_final_validation.py +153 -153
  101. tests/test_proxy_health_check.py +32 -32
  102. tests/test_proxy_middleware_integration.py +136 -136
  103. tests/test_proxy_providers.py +56 -56
  104. tests/test_proxy_stats.py +19 -19
  105. tests/test_proxy_strategies.py +59 -59
  106. tests/test_redis_config.py +28 -28
  107. tests/test_redis_queue.py +224 -224
  108. tests/test_request_serialization.py +70 -70
  109. tests/test_scheduler.py +241 -241
  110. crawlo-1.1.2.dist-info/RECORD +0 -108
  111. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  112. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  113. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,257 +1,259 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import httpx
4
- from typing import Optional
5
- from httpx import AsyncClient, Timeout, Limits
6
-
7
- from crawlo.network.response import Response
8
- from crawlo.downloader import DownloaderBase
9
- from crawlo.utils.log import get_logger
10
-
11
- # 尝试导入 httpx 异常,用于更精确地捕获
12
- try:
13
- # httpx 0.23.0+ 将异常移到了 _exceptions
14
- from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
- except ImportError:
16
- try:
17
- # 旧版本可能在 httpcore 或顶层
18
- from httpcore import ConnectError
19
- from httpx import TimeoutException, NetworkError, HTTPStatusError
20
- except ImportError:
21
- ConnectError = httpx.ConnectError
22
- TimeoutException = httpx.TimeoutException
23
- NetworkError = httpx.NetworkError
24
- HTTPStatusError = httpx.HTTPStatusError
25
-
26
- # 定义我们认为是网络问题,应该触发降级的异常
27
- NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
-
29
- class HttpXDownloader(DownloaderBase):
30
- """
31
- 基于 httpx 的高性能异步下载器
32
- - 使用持久化 AsyncClient(推荐做法)
33
- - 支持连接池、HTTP/2、透明代理
34
- - 智能处理 Request 的 json_body 和 form_data
35
- - 支持代理失败后自动降级为直连
36
- """
37
-
38
- def __init__(self, crawler):
39
- super().__init__(crawler)
40
- self._client: Optional[AsyncClient] = None
41
- self._client_timeout: Optional[Timeout] = None
42
- self._client_limits: Optional[Limits] = None
43
- self._client_verify: bool = True
44
- self._client_http2: bool = False
45
- self.max_download_size: Optional[int] = None
46
- # ------------------------
47
- self._timeout: Optional[Timeout] = None
48
- self._limits: Optional[Limits] = None
49
- # --- 获取 logger 实例 ---
50
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
51
-
52
- def open(self):
53
- super().open()
54
- self.logger.info("Opening HttpXDownloader")
55
-
56
- # 读取配置
57
- timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
58
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
59
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
60
- max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
61
-
62
- # 保存配置
63
- self.max_download_size = max_download_size
64
-
65
- # --- 保存客户端配置以便复用 ---
66
- self._client_timeout = Timeout(
67
- connect=10.0, # 建立连接超时
68
- read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
69
- write=10.0, # 发送数据超时
70
- pool=1.0 # 从连接池获取连接的超时
71
- )
72
- self._client_limits = Limits(
73
- max_connections=pool_limit,
74
- max_keepalive_connections=pool_per_host
75
- )
76
- self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
77
- self._client_http2 = True # 启用 HTTP/2 支持
78
- # ----------------------------
79
-
80
- # 创建持久化客户端 (不在此处设置全局代理)
81
- self._client = AsyncClient(
82
- timeout=self._client_timeout,
83
- limits=self._client_limits,
84
- verify=self._client_verify,
85
- http2=self._client_http2,
86
- follow_redirects=True, # 自动跟随重定向
87
- # 注意:此处不设置 proxy 或 proxies
88
- )
89
-
90
- self.logger.debug("HttpXDownloader initialized.")
91
-
92
- async def download(self, request) -> Optional[Response]:
93
- """下载请求并返回响应,支持代理失败后的优雅降级"""
94
- if not self._client:
95
- raise RuntimeError("HttpXDownloader client is not available.")
96
-
97
- start_time = None
98
- if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
99
- import time
100
- start_time = time.time()
101
-
102
- # --- 1. 确定要使用的 client 实例 ---
103
- effective_client = self._client # 默认使用共享的主 client
104
- temp_client = None # 用于可能创建的临时 client
105
- used_proxy = None # 记录当前尝试使用的代理
106
-
107
- try:
108
- # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
109
- kwargs = {
110
- "method": request.method,
111
- "url": request.url,
112
- "headers": request.headers,
113
- "cookies": request.cookies,
114
- "follow_redirects": request.allow_redirects,
115
- }
116
-
117
- # 智能处理 body(关键优化)
118
- if hasattr(request, "_json_body") and request._json_body is not None:
119
- kwargs["json"] = request._json_body # httpx 处理序列化
120
- elif isinstance(request.body, (dict, list)):
121
- kwargs["json"] = request.body
122
- else:
123
- kwargs["content"] = request.body # 使用 content 而不是 data
124
-
125
- # --- 3. 处理代理 ---
126
- httpx_proxy_config = None # 用于初始化临时 client 的代理配置
127
- if request.proxy:
128
- # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
129
- if isinstance(request.proxy, str):
130
- # 直接是代理 URL 字符串
131
- httpx_proxy_config = request.proxy
132
- elif isinstance(request.proxy, dict):
133
- # 从字典中选择合适的代理 URL
134
- # 优先选择与请求协议匹配的,否则 fallback 到 http
135
- from urllib.parse import urlparse
136
- request_scheme = urlparse(request.url).scheme
137
- if request_scheme == "https" and request.proxy.get("https"):
138
- httpx_proxy_config = request.proxy["https"]
139
- elif request.proxy.get("http"):
140
- httpx_proxy_config = request.proxy["http"]
141
- else:
142
- # 如果没有匹配的,尝试使用任意一个
143
- httpx_proxy_config = next(iter(request.proxy.values()), None)
144
- if httpx_proxy_config:
145
- self.logger.warning(
146
- f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
147
- )
148
-
149
- # 如果成功解析出代理配置,则创建临时 client
150
- if httpx_proxy_config:
151
- try:
152
- # --- 4. 创建临时 client,配置代理 ---
153
- # 使用在 open() 中保存的配置
154
- temp_client = AsyncClient(
155
- timeout=self._client_timeout,
156
- limits=self._client_limits,
157
- verify=self._client_verify,
158
- http2=self._client_http2,
159
- follow_redirects=True, # 确保继承
160
- proxy=httpx_proxy_config, # 设置代理
161
- )
162
- effective_client = temp_client
163
- used_proxy = httpx_proxy_config # 记录使用的代理
164
- self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
165
- except Exception as e:
166
- self.logger.error(f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
167
- # 出错则回退到使用主 client(无代理)
168
- # 可以选择抛出异常或继续
169
- # raise # 如果希望代理失败导致请求失败,取消注释
170
-
171
- # --- 5. 发送请求 (带降级逻辑) ---
172
- try:
173
- httpx_response = await effective_client.request(**kwargs)
174
- except NETWORK_EXCEPTIONS as proxy_error:
175
- # --- 优雅降级逻辑 ---
176
- # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
177
- if temp_client is not None and effective_client is temp_client:
178
- # 记录警告日志
179
- self.logger.warning(
180
- f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
181
- )
182
- # 关闭失败的临时客户端
183
- await temp_client.aclose()
184
- temp_client = None # 防止 finally 再次关闭
185
-
186
- # 切换到主客户端(直连)
187
- effective_client = self._client
188
- # 再次尝试发送请求
189
- httpx_response = await effective_client.request(**kwargs)
190
- else:
191
- # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
192
- raise
193
-
194
- # --- 6. 安全检查:防止大响应体 ---
195
- content_length = httpx_response.headers.get("Content-Length")
196
- if content_length and int(content_length) > self.max_download_size:
197
- await httpx_response.aclose() # 立即关闭连接,释放资源
198
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
199
-
200
- # --- 7. 读取响应体 ---
201
- body = await httpx_response.aread()
202
-
203
- # --- 8. 记录下载统计 ---
204
- if start_time:
205
- download_time = time.time() - start_time
206
- self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
207
-
208
- # --- 9. 构造并返回 Response ---
209
- return self.structure_response(request=request, response=httpx_response, body=body)
210
-
211
- except httpx.TimeoutException as e:
212
- self.logger.error(f"Timeout error for {request.url}: {e}")
213
- raise
214
- except httpx.NetworkError as e:
215
- self.logger.error(f"Network error for {request.url}: {e}")
216
- raise
217
- except httpx.HTTPStatusError as e:
218
- self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
219
- # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
220
- # 如果需要在此处 raise,可取消注释下一行
221
- # raise
222
- # 读取响应体以便 structure_response 处理
223
- try:
224
- error_body = await e.response.aread()
225
- except Exception:
226
- error_body = b"" # 如果读取错误响应体失败,则为空
227
- return self.structure_response(request=request, response=e.response, body=error_body)
228
- except Exception as e:
229
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
230
- raise
231
-
232
- finally:
233
- # --- 10. 清理:关闭临时 client ---
234
- # 如果创建了临时 client,则关闭它
235
- if temp_client:
236
- try:
237
- await temp_client.aclose()
238
- # self.logger.debug("Closed temporary client.")
239
- except Exception as e:
240
- self.logger.warning(f"Error closing temporary client: {e}")
241
-
242
- @staticmethod
243
- def structure_response(request, response: httpx.Response, body: bytes) -> Response:
244
- return Response(
245
- url=str(response.url), # httpx URL 是对象,需转字符串
246
- headers=dict(response.headers),
247
- status_code=response.status_code, # 注意:使用 status_code
248
- body=body,
249
- request=request
250
- )
251
-
252
- async def close(self) -> None:
253
- """关闭主客户端"""
254
- if self._client:
255
- self.logger.info("Closing HttpXDownloader client...")
256
- await self._client.aclose()
257
- self.logger.debug("HttpXDownloader closed.")
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import httpx
4
+ from typing import Optional
5
+ from httpx import AsyncClient, Timeout, Limits
6
+
7
+ from crawlo.network.response import Response
8
+ from crawlo.downloader import DownloaderBase
9
+ from crawlo.utils.log import get_logger
10
+
11
+ # 尝试导入 httpx 异常,用于更精确地捕获
12
+ try:
13
+ # httpx 0.23.0+ 将异常移到了 _exceptions
14
+ from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
+ except ImportError:
16
+ try:
17
+ # 旧版本可能在 httpcore 或顶层
18
+ from httpcore import ConnectError
19
+ from httpx import TimeoutException, NetworkError, HTTPStatusError
20
+ except ImportError:
21
+ ConnectError = httpx.ConnectError
22
+ TimeoutException = httpx.TimeoutException
23
+ NetworkError = httpx.NetworkError
24
+ HTTPStatusError = httpx.HTTPStatusError
25
+
26
+ # 定义我们认为是网络问题,应该触发降级的异常
27
+ NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
+
29
+
30
+ class HttpXDownloader(DownloaderBase):
31
+ """
32
+ 基于 httpx 的高性能异步下载器
33
+ - 使用持久化 AsyncClient(推荐做法)
34
+ - 支持连接池、HTTP/2、透明代理
35
+ - 智能处理 Request 的 json_body 和 form_data
36
+ - 支持代理失败后自动降级为直连
37
+ """
38
+
39
+ def __init__(self, crawler):
40
+ super().__init__(crawler)
41
+ self._client: Optional[AsyncClient] = None
42
+ self._client_timeout: Optional[Timeout] = None
43
+ self._client_limits: Optional[Limits] = None
44
+ self._client_verify: bool = True
45
+ self._client_http2: bool = False
46
+ self.max_download_size: Optional[int] = None
47
+ # ------------------------
48
+ self._timeout: Optional[Timeout] = None
49
+ self._limits: Optional[Limits] = None
50
+ # --- 获取 logger 实例 ---
51
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
52
+
53
+ def open(self):
54
+ super().open()
55
+ self.logger.info("Opening HttpXDownloader")
56
+
57
+ # 读取配置
58
+ timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
59
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
60
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
61
+ max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
62
+
63
+ # 保存配置
64
+ self.max_download_size = max_download_size
65
+
66
+ # --- 保存客户端配置以便复用 ---
67
+ self._client_timeout = Timeout(
68
+ connect=10.0, # 建立连接超时
69
+ read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
70
+ write=10.0, # 发送数据超时
71
+ pool=1.0 # 从连接池获取连接的超时
72
+ )
73
+ self._client_limits = Limits(
74
+ max_connections=pool_limit,
75
+ max_keepalive_connections=pool_per_host
76
+ )
77
+ self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
78
+ self._client_http2 = True # 启用 HTTP/2 支持
79
+ # ----------------------------
80
+
81
+ # 创建持久化客户端 (不在此处设置全局代理)
82
+ self._client = AsyncClient(
83
+ timeout=self._client_timeout,
84
+ limits=self._client_limits,
85
+ verify=self._client_verify,
86
+ http2=self._client_http2,
87
+ follow_redirects=True, # 自动跟随重定向
88
+ # 注意:此处不设置 proxy 或 proxies
89
+ )
90
+
91
+ self.logger.debug("HttpXDownloader initialized.")
92
+
93
+ async def download(self, request) -> Optional[Response]:
94
+ """下载请求并返回响应,支持代理失败后的优雅降级"""
95
+ if not self._client:
96
+ raise RuntimeError("HttpXDownloader client is not available.")
97
+
98
+ start_time = None
99
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
100
+ import time
101
+ start_time = time.time()
102
+
103
+ # --- 1. 确定要使用的 client 实例 ---
104
+ effective_client = self._client # 默认使用共享的主 client
105
+ temp_client = None # 用于可能创建的临时 client
106
+ used_proxy = None # 记录当前尝试使用的代理
107
+
108
+ try:
109
+ # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
110
+ kwargs = {
111
+ "method": request.method,
112
+ "url": request.url,
113
+ "headers": request.headers,
114
+ "cookies": request.cookies,
115
+ "follow_redirects": request.allow_redirects,
116
+ }
117
+
118
+ # 智能处理 body(关键优化)
119
+ if hasattr(request, "_json_body") and request._json_body is not None:
120
+ kwargs["json"] = request._json_body # httpx 处理序列化
121
+ elif isinstance(request.body, (dict, list)):
122
+ kwargs["json"] = request.body
123
+ else:
124
+ kwargs["content"] = request.body # 使用 content 而不是 data
125
+
126
+ # --- 3. 处理代理 ---
127
+ httpx_proxy_config = None # 用于初始化临时 client 的代理配置
128
+ if request.proxy:
129
+ # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
130
+ if isinstance(request.proxy, str):
131
+ # 直接是代理 URL 字符串
132
+ httpx_proxy_config = request.proxy
133
+ elif isinstance(request.proxy, dict):
134
+ # 从字典中选择合适的代理 URL
135
+ # 优先选择与请求协议匹配的,否则 fallback 到 http
136
+ from urllib.parse import urlparse
137
+ request_scheme = urlparse(request.url).scheme
138
+ if request_scheme == "https" and request.proxy.get("https"):
139
+ httpx_proxy_config = request.proxy["https"]
140
+ elif request.proxy.get("http"):
141
+ httpx_proxy_config = request.proxy["http"]
142
+ else:
143
+ # 如果没有匹配的,尝试使用任意一个
144
+ httpx_proxy_config = next(iter(request.proxy.values()), None)
145
+ if httpx_proxy_config:
146
+ self.logger.warning(
147
+ f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
148
+ )
149
+
150
+ # 如果成功解析出代理配置,则创建临时 client
151
+ if httpx_proxy_config:
152
+ try:
153
+ # --- 4. 创建临时 client,配置代理 ---
154
+ # 使用在 open() 中保存的配置
155
+ temp_client = AsyncClient(
156
+ timeout=self._client_timeout,
157
+ limits=self._client_limits,
158
+ verify=self._client_verify,
159
+ http2=self._client_http2,
160
+ follow_redirects=True, # 确保继承
161
+ proxy=httpx_proxy_config, # 设置代理
162
+ )
163
+ effective_client = temp_client
164
+ used_proxy = httpx_proxy_config # 记录使用的代理
165
+ self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
166
+ except Exception as e:
167
+ self.logger.error(
168
+ f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
169
+ # 出错则回退到使用主 client(无代理)
170
+ # 可以选择抛出异常或继续
171
+ # raise # 如果希望代理失败导致请求失败,取消注释
172
+
173
+ # --- 5. 发送请求 (带降级逻辑) ---
174
+ try:
175
+ httpx_response = await effective_client.request(**kwargs)
176
+ except NETWORK_EXCEPTIONS as proxy_error:
177
+ # --- 优雅降级逻辑 ---
178
+ # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
179
+ if temp_client is not None and effective_client is temp_client:
180
+ # 记录警告日志
181
+ self.logger.warning(
182
+ f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
183
+ )
184
+ # 关闭失败的临时客户端
185
+ await temp_client.aclose()
186
+ temp_client = None # 防止 finally 再次关闭
187
+
188
+ # 切换到主客户端(直连)
189
+ effective_client = self._client
190
+ # 再次尝试发送请求
191
+ httpx_response = await effective_client.request(**kwargs)
192
+ else:
193
+ # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
194
+ raise
195
+
196
+ # --- 6. 安全检查:防止大响应体 ---
197
+ content_length = httpx_response.headers.get("Content-Length")
198
+ if content_length and int(content_length) > self.max_download_size:
199
+ await httpx_response.aclose() # 立即关闭连接,释放资源
200
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
201
+
202
+ # --- 7. 读取响应体 ---
203
+ body = await httpx_response.aread()
204
+
205
+ # --- 8. 记录下载统计 ---
206
+ if start_time:
207
+ download_time = time.time() - start_time
208
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
209
+
210
+ # --- 9. 构造并返回 Response ---
211
+ return self.structure_response(request=request, response=httpx_response, body=body)
212
+
213
+ except httpx.TimeoutException as e:
214
+ self.logger.error(f"Timeout error for {request.url}: {e}")
215
+ raise
216
+ except httpx.NetworkError as e:
217
+ self.logger.error(f"Network error for {request.url}: {e}")
218
+ raise
219
+ except httpx.HTTPStatusError as e:
220
+ self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
221
+ # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
222
+ # 如果需要在此处 raise,可取消注释下一行
223
+ # raise
224
+ # 读取响应体以便 structure_response 处理
225
+ try:
226
+ error_body = await e.response.aread()
227
+ except Exception:
228
+ error_body = b"" # 如果读取错误响应体失败,则为空
229
+ return self.structure_response(request=request, response=e.response, body=error_body)
230
+ except Exception as e:
231
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
232
+ raise
233
+
234
+ finally:
235
+ # --- 10. 清理:关闭临时 client ---
236
+ # 如果创建了临时 client,则关闭它
237
+ if temp_client:
238
+ try:
239
+ await temp_client.aclose()
240
+ # self.logger.debug("Closed temporary client.")
241
+ except Exception as e:
242
+ self.logger.warning(f"Error closing temporary client: {e}")
243
+
244
+ @staticmethod
245
+ def structure_response(request, response: httpx.Response, body: bytes) -> Response:
246
+ return Response(
247
+ url=str(response.url), # httpx 的 URL 是对象,需转字符串
248
+ headers=dict(response.headers),
249
+ status_code=response.status_code, # 注意:使用 status_code
250
+ body=body,
251
+ request=request
252
+ )
253
+
254
+ async def close(self) -> None:
255
+ """关闭主客户端"""
256
+ if self._client:
257
+ self.logger.info("Closing HttpXDownloader client...")
258
+ await self._client.aclose()
259
+ self.logger.debug("HttpXDownloader closed.")
crawlo/event.py CHANGED
@@ -1,11 +1,11 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
-
4
- spider_error = "spider_error"
5
- spider_opened = "spider_open"
6
- spider_closed = "spider_closed"
7
- ignore_request = "ignore_request"
8
- request_scheduled = "request_scheduled"
9
- response_received = "request_received"
10
- item_successful = "item_successful"
11
- item_discard = "item_discard"
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+
4
+ spider_error = "spider_error"
5
+ spider_opened = "spider_open"
6
+ spider_closed = "spider_closed"
7
+ ignore_request = "ignore_request"
8
+ request_scheduled = "request_scheduled"
9
+ response_received = "request_received"
10
+ item_successful = "item_successful"
11
+ item_discard = "item_discard"