crawlo 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (118) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +165 -165
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -259
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +81 -81
  25. crawlo/extension/__init__.py +38 -31
  26. crawlo/extension/health_check.py +142 -0
  27. crawlo/extension/log_interval.py +58 -49
  28. crawlo/extension/log_stats.py +82 -44
  29. crawlo/extension/logging_extension.py +44 -35
  30. crawlo/extension/memory_monitor.py +89 -0
  31. crawlo/extension/performance_profiler.py +118 -0
  32. crawlo/extension/request_recorder.py +108 -0
  33. crawlo/filters/__init__.py +154 -154
  34. crawlo/filters/aioredis_filter.py +241 -241
  35. crawlo/filters/memory_filter.py +269 -269
  36. crawlo/items/__init__.py +23 -23
  37. crawlo/items/base.py +21 -21
  38. crawlo/items/fields.py +53 -53
  39. crawlo/items/items.py +104 -104
  40. crawlo/middleware/__init__.py +21 -21
  41. crawlo/middleware/default_header.py +32 -32
  42. crawlo/middleware/download_delay.py +28 -28
  43. crawlo/middleware/middleware_manager.py +135 -135
  44. crawlo/middleware/proxy.py +248 -248
  45. crawlo/middleware/request_ignore.py +30 -30
  46. crawlo/middleware/response_code.py +18 -18
  47. crawlo/middleware/response_filter.py +26 -26
  48. crawlo/middleware/retry.py +124 -124
  49. crawlo/mode_manager.py +200 -200
  50. crawlo/network/__init__.py +21 -21
  51. crawlo/network/request.py +311 -311
  52. crawlo/network/response.py +271 -271
  53. crawlo/pipelines/__init__.py +21 -21
  54. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  55. crawlo/pipelines/console_pipeline.py +39 -39
  56. crawlo/pipelines/csv_pipeline.py +316 -316
  57. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  58. crawlo/pipelines/json_pipeline.py +218 -218
  59. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  60. crawlo/pipelines/mongo_pipeline.py +132 -117
  61. crawlo/pipelines/mysql_pipeline.py +317 -195
  62. crawlo/pipelines/pipeline_manager.py +56 -56
  63. crawlo/pipelines/redis_dedup_pipeline.py +162 -162
  64. crawlo/project.py +153 -153
  65. crawlo/queue/pqueue.py +37 -37
  66. crawlo/queue/queue_manager.py +307 -307
  67. crawlo/queue/redis_priority_queue.py +208 -208
  68. crawlo/settings/__init__.py +7 -7
  69. crawlo/settings/default_settings.py +278 -244
  70. crawlo/settings/setting_manager.py +99 -99
  71. crawlo/spider/__init__.py +639 -639
  72. crawlo/stats_collector.py +59 -59
  73. crawlo/subscriber.py +131 -106
  74. crawlo/task_manager.py +30 -30
  75. crawlo/templates/crawlo.cfg.tmpl +10 -10
  76. crawlo/templates/project/__init__.py.tmpl +3 -3
  77. crawlo/templates/project/items.py.tmpl +17 -17
  78. crawlo/templates/project/middlewares.py.tmpl +111 -87
  79. crawlo/templates/project/pipelines.py.tmpl +97 -341
  80. crawlo/templates/project/run.py.tmpl +251 -251
  81. crawlo/templates/project/settings.py.tmpl +279 -250
  82. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  83. crawlo/templates/spider/spider.py.tmpl +142 -178
  84. crawlo/utils/__init__.py +7 -7
  85. crawlo/utils/controlled_spider_mixin.py +439 -439
  86. crawlo/utils/date_tools.py +233 -233
  87. crawlo/utils/db_helper.py +343 -343
  88. crawlo/utils/func_tools.py +82 -82
  89. crawlo/utils/large_scale_config.py +286 -286
  90. crawlo/utils/large_scale_helper.py +343 -343
  91. crawlo/utils/log.py +128 -128
  92. crawlo/utils/queue_helper.py +175 -175
  93. crawlo/utils/request.py +267 -267
  94. crawlo/utils/request_serializer.py +219 -219
  95. crawlo/utils/spider_loader.py +62 -62
  96. crawlo/utils/system.py +11 -11
  97. crawlo/utils/tools.py +4 -4
  98. crawlo/utils/url.py +39 -39
  99. crawlo-1.1.4.dist-info/METADATA +403 -0
  100. crawlo-1.1.4.dist-info/RECORD +117 -0
  101. examples/__init__.py +7 -7
  102. examples/controlled_spider_example.py +205 -205
  103. tests/__init__.py +7 -7
  104. tests/test_final_validation.py +153 -153
  105. tests/test_proxy_health_check.py +32 -32
  106. tests/test_proxy_middleware_integration.py +136 -136
  107. tests/test_proxy_providers.py +56 -56
  108. tests/test_proxy_stats.py +19 -19
  109. tests/test_proxy_strategies.py +59 -59
  110. tests/test_redis_config.py +28 -28
  111. tests/test_redis_queue.py +224 -224
  112. tests/test_request_serialization.py +70 -70
  113. tests/test_scheduler.py +241 -241
  114. crawlo-1.1.3.dist-info/METADATA +0 -635
  115. crawlo-1.1.3.dist-info/RECORD +0 -113
  116. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
  117. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
  118. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,259 +1,259 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import httpx
4
- from typing import Optional
5
- from httpx import AsyncClient, Timeout, Limits
6
-
7
- from crawlo.network.response import Response
8
- from crawlo.downloader import DownloaderBase
9
- from crawlo.utils.log import get_logger
10
-
11
- # 尝试导入 httpx 异常,用于更精确地捕获
12
- try:
13
- # httpx 0.23.0+ 将异常移到了 _exceptions
14
- from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
- except ImportError:
16
- try:
17
- # 旧版本可能在 httpcore 或顶层
18
- from httpcore import ConnectError
19
- from httpx import TimeoutException, NetworkError, HTTPStatusError
20
- except ImportError:
21
- ConnectError = httpx.ConnectError
22
- TimeoutException = httpx.TimeoutException
23
- NetworkError = httpx.NetworkError
24
- HTTPStatusError = httpx.HTTPStatusError
25
-
26
- # 定义我们认为是网络问题,应该触发降级的异常
27
- NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
-
29
-
30
- class HttpXDownloader(DownloaderBase):
31
- """
32
- 基于 httpx 的高性能异步下载器
33
- - 使用持久化 AsyncClient(推荐做法)
34
- - 支持连接池、HTTP/2、透明代理
35
- - 智能处理 Request 的 json_body 和 form_data
36
- - 支持代理失败后自动降级为直连
37
- """
38
-
39
- def __init__(self, crawler):
40
- super().__init__(crawler)
41
- self._client: Optional[AsyncClient] = None
42
- self._client_timeout: Optional[Timeout] = None
43
- self._client_limits: Optional[Limits] = None
44
- self._client_verify: bool = True
45
- self._client_http2: bool = False
46
- self.max_download_size: Optional[int] = None
47
- # ------------------------
48
- self._timeout: Optional[Timeout] = None
49
- self._limits: Optional[Limits] = None
50
- # --- 获取 logger 实例 ---
51
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
52
-
53
- def open(self):
54
- super().open()
55
- self.logger.info("Opening HttpXDownloader")
56
-
57
- # 读取配置
58
- timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
59
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
60
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
61
- max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
62
-
63
- # 保存配置
64
- self.max_download_size = max_download_size
65
-
66
- # --- 保存客户端配置以便复用 ---
67
- self._client_timeout = Timeout(
68
- connect=10.0, # 建立连接超时
69
- read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
70
- write=10.0, # 发送数据超时
71
- pool=1.0 # 从连接池获取连接的超时
72
- )
73
- self._client_limits = Limits(
74
- max_connections=pool_limit,
75
- max_keepalive_connections=pool_per_host
76
- )
77
- self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
78
- self._client_http2 = True # 启用 HTTP/2 支持
79
- # ----------------------------
80
-
81
- # 创建持久化客户端 (不在此处设置全局代理)
82
- self._client = AsyncClient(
83
- timeout=self._client_timeout,
84
- limits=self._client_limits,
85
- verify=self._client_verify,
86
- http2=self._client_http2,
87
- follow_redirects=True, # 自动跟随重定向
88
- # 注意:此处不设置 proxy 或 proxies
89
- )
90
-
91
- self.logger.debug("HttpXDownloader initialized.")
92
-
93
- async def download(self, request) -> Optional[Response]:
94
- """下载请求并返回响应,支持代理失败后的优雅降级"""
95
- if not self._client:
96
- raise RuntimeError("HttpXDownloader client is not available.")
97
-
98
- start_time = None
99
- if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
100
- import time
101
- start_time = time.time()
102
-
103
- # --- 1. 确定要使用的 client 实例 ---
104
- effective_client = self._client # 默认使用共享的主 client
105
- temp_client = None # 用于可能创建的临时 client
106
- used_proxy = None # 记录当前尝试使用的代理
107
-
108
- try:
109
- # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
110
- kwargs = {
111
- "method": request.method,
112
- "url": request.url,
113
- "headers": request.headers,
114
- "cookies": request.cookies,
115
- "follow_redirects": request.allow_redirects,
116
- }
117
-
118
- # 智能处理 body(关键优化)
119
- if hasattr(request, "_json_body") and request._json_body is not None:
120
- kwargs["json"] = request._json_body # 让 httpx 处理序列化
121
- elif isinstance(request.body, (dict, list)):
122
- kwargs["json"] = request.body
123
- else:
124
- kwargs["content"] = request.body # 使用 content 而不是 data
125
-
126
- # --- 3. 处理代理 ---
127
- httpx_proxy_config = None # 用于初始化临时 client 的代理配置
128
- if request.proxy:
129
- # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
130
- if isinstance(request.proxy, str):
131
- # 直接是代理 URL 字符串
132
- httpx_proxy_config = request.proxy
133
- elif isinstance(request.proxy, dict):
134
- # 从字典中选择合适的代理 URL
135
- # 优先选择与请求协议匹配的,否则 fallback 到 http
136
- from urllib.parse import urlparse
137
- request_scheme = urlparse(request.url).scheme
138
- if request_scheme == "https" and request.proxy.get("https"):
139
- httpx_proxy_config = request.proxy["https"]
140
- elif request.proxy.get("http"):
141
- httpx_proxy_config = request.proxy["http"]
142
- else:
143
- # 如果没有匹配的,尝试使用任意一个
144
- httpx_proxy_config = next(iter(request.proxy.values()), None)
145
- if httpx_proxy_config:
146
- self.logger.warning(
147
- f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
148
- )
149
-
150
- # 如果成功解析出代理配置,则创建临时 client
151
- if httpx_proxy_config:
152
- try:
153
- # --- 4. 创建临时 client,配置代理 ---
154
- # 使用在 open() 中保存的配置
155
- temp_client = AsyncClient(
156
- timeout=self._client_timeout,
157
- limits=self._client_limits,
158
- verify=self._client_verify,
159
- http2=self._client_http2,
160
- follow_redirects=True, # 确保继承
161
- proxy=httpx_proxy_config, # 设置代理
162
- )
163
- effective_client = temp_client
164
- used_proxy = httpx_proxy_config # 记录使用的代理
165
- self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
166
- except Exception as e:
167
- self.logger.error(
168
- f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
169
- # 出错则回退到使用主 client(无代理)
170
- # 可以选择抛出异常或继续
171
- # raise # 如果希望代理失败导致请求失败,取消注释
172
-
173
- # --- 5. 发送请求 (带降级逻辑) ---
174
- try:
175
- httpx_response = await effective_client.request(**kwargs)
176
- except NETWORK_EXCEPTIONS as proxy_error:
177
- # --- 优雅降级逻辑 ---
178
- # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
179
- if temp_client is not None and effective_client is temp_client:
180
- # 记录警告日志
181
- self.logger.warning(
182
- f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
183
- )
184
- # 关闭失败的临时客户端
185
- await temp_client.aclose()
186
- temp_client = None # 防止 finally 再次关闭
187
-
188
- # 切换到主客户端(直连)
189
- effective_client = self._client
190
- # 再次尝试发送请求
191
- httpx_response = await effective_client.request(**kwargs)
192
- else:
193
- # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
194
- raise
195
-
196
- # --- 6. 安全检查:防止大响应体 ---
197
- content_length = httpx_response.headers.get("Content-Length")
198
- if content_length and int(content_length) > self.max_download_size:
199
- await httpx_response.aclose() # 立即关闭连接,释放资源
200
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
201
-
202
- # --- 7. 读取响应体 ---
203
- body = await httpx_response.aread()
204
-
205
- # --- 8. 记录下载统计 ---
206
- if start_time:
207
- download_time = time.time() - start_time
208
- self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
209
-
210
- # --- 9. 构造并返回 Response ---
211
- return self.structure_response(request=request, response=httpx_response, body=body)
212
-
213
- except httpx.TimeoutException as e:
214
- self.logger.error(f"Timeout error for {request.url}: {e}")
215
- raise
216
- except httpx.NetworkError as e:
217
- self.logger.error(f"Network error for {request.url}: {e}")
218
- raise
219
- except httpx.HTTPStatusError as e:
220
- self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
221
- # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
222
- # 如果需要在此处 raise,可取消注释下一行
223
- # raise
224
- # 读取响应体以便 structure_response 处理
225
- try:
226
- error_body = await e.response.aread()
227
- except Exception:
228
- error_body = b"" # 如果读取错误响应体失败,则为空
229
- return self.structure_response(request=request, response=e.response, body=error_body)
230
- except Exception as e:
231
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
232
- raise
233
-
234
- finally:
235
- # --- 10. 清理:关闭临时 client ---
236
- # 如果创建了临时 client,则关闭它
237
- if temp_client:
238
- try:
239
- await temp_client.aclose()
240
- # self.logger.debug("Closed temporary client.")
241
- except Exception as e:
242
- self.logger.warning(f"Error closing temporary client: {e}")
243
-
244
- @staticmethod
245
- def structure_response(request, response: httpx.Response, body: bytes) -> Response:
246
- return Response(
247
- url=str(response.url), # httpx 的 URL 是对象,需转字符串
248
- headers=dict(response.headers),
249
- status_code=response.status_code, # 注意:使用 status_code
250
- body=body,
251
- request=request
252
- )
253
-
254
- async def close(self) -> None:
255
- """关闭主客户端"""
256
- if self._client:
257
- self.logger.info("Closing HttpXDownloader client...")
258
- await self._client.aclose()
259
- self.logger.debug("HttpXDownloader closed.")
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import httpx
4
+ from typing import Optional
5
+ from httpx import AsyncClient, Timeout, Limits
6
+
7
+ from crawlo.network.response import Response
8
+ from crawlo.downloader import DownloaderBase
9
+ from crawlo.utils.log import get_logger
10
+
11
+ # 尝试导入 httpx 异常,用于更精确地捕获
12
+ try:
13
+ # httpx 0.23.0+ 将异常移到了 _exceptions
14
+ from httpx import ConnectError, TimeoutException, NetworkError, HTTPStatusError
15
+ except ImportError:
16
+ try:
17
+ # 旧版本可能在 httpcore 或顶层
18
+ from httpcore import ConnectError
19
+ from httpx import TimeoutException, NetworkError, HTTPStatusError
20
+ except ImportError:
21
+ ConnectError = httpx.ConnectError
22
+ TimeoutException = httpx.TimeoutException
23
+ NetworkError = httpx.NetworkError
24
+ HTTPStatusError = httpx.HTTPStatusError
25
+
26
+ # 定义我们认为是网络问题,应该触发降级的异常
27
+ NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
28
+
29
+
30
+ class HttpXDownloader(DownloaderBase):
31
+ """
32
+ 基于 httpx 的高性能异步下载器
33
+ - 使用持久化 AsyncClient(推荐做法)
34
+ - 支持连接池、HTTP/2、透明代理
35
+ - 智能处理 Request 的 json_body 和 form_data
36
+ - 支持代理失败后自动降级为直连
37
+ """
38
+
39
+ def __init__(self, crawler):
40
+ super().__init__(crawler)
41
+ self._client: Optional[AsyncClient] = None
42
+ self._client_timeout: Optional[Timeout] = None
43
+ self._client_limits: Optional[Limits] = None
44
+ self._client_verify: bool = True
45
+ self._client_http2: bool = False
46
+ self.max_download_size: Optional[int] = None
47
+ # ------------------------
48
+ self._timeout: Optional[Timeout] = None
49
+ self._limits: Optional[Limits] = None
50
+ # --- 获取 logger 实例 ---
51
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
52
+
53
+ def open(self):
54
+ super().open()
55
+ self.logger.info("Opening HttpXDownloader")
56
+
57
+ # 读取配置
58
+ timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
59
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
60
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
61
+ max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
62
+
63
+ # 保存配置
64
+ self.max_download_size = max_download_size
65
+
66
+ # --- 保存客户端配置以便复用 ---
67
+ self._client_timeout = Timeout(
68
+ connect=10.0, # 建立连接超时
69
+ read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
70
+ write=10.0, # 发送数据超时
71
+ pool=1.0 # 从连接池获取连接的超时
72
+ )
73
+ self._client_limits = Limits(
74
+ max_connections=pool_limit,
75
+ max_keepalive_connections=pool_per_host
76
+ )
77
+ self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
78
+ self._client_http2 = True # 启用 HTTP/2 支持
79
+ # ----------------------------
80
+
81
+ # 创建持久化客户端 (不在此处设置全局代理)
82
+ self._client = AsyncClient(
83
+ timeout=self._client_timeout,
84
+ limits=self._client_limits,
85
+ verify=self._client_verify,
86
+ http2=self._client_http2,
87
+ follow_redirects=True, # 自动跟随重定向
88
+ # 注意:此处不设置 proxy 或 proxies
89
+ )
90
+
91
+ self.logger.debug("HttpXDownloader initialized.")
92
+
93
+ async def download(self, request) -> Optional[Response]:
94
+ """下载请求并返回响应,支持代理失败后的优雅降级"""
95
+ if not self._client:
96
+ raise RuntimeError("HttpXDownloader client is not available.")
97
+
98
+ start_time = None
99
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
100
+ import time
101
+ start_time = time.time()
102
+
103
+ # --- 1. 确定要使用的 client 实例 ---
104
+ effective_client = self._client # 默认使用共享的主 client
105
+ temp_client = None # 用于可能创建的临时 client
106
+ used_proxy = None # 记录当前尝试使用的代理
107
+
108
+ try:
109
+ # --- 2. 构造发送参数 (不包含 proxy/proxies) ---
110
+ kwargs = {
111
+ "method": request.method,
112
+ "url": request.url,
113
+ "headers": request.headers,
114
+ "cookies": request.cookies,
115
+ "follow_redirects": request.allow_redirects,
116
+ }
117
+
118
+ # 智能处理 body(关键优化)
119
+ if hasattr(request, "_json_body") and request._json_body is not None:
120
+ kwargs["json"] = request._json_body # 让 httpx 处理序列化
121
+ elif isinstance(request.body, (dict, list)):
122
+ kwargs["json"] = request.body
123
+ else:
124
+ kwargs["content"] = request.body # 使用 content 而不是 data
125
+
126
+ # --- 3. 处理代理 ---
127
+ httpx_proxy_config = None # 用于初始化临时 client 的代理配置
128
+ if request.proxy:
129
+ # 根据 request.proxy 的类型准备 httpx 的 proxy 参数
130
+ if isinstance(request.proxy, str):
131
+ # 直接是代理 URL 字符串
132
+ httpx_proxy_config = request.proxy
133
+ elif isinstance(request.proxy, dict):
134
+ # 从字典中选择合适的代理 URL
135
+ # 优先选择与请求协议匹配的,否则 fallback 到 http
136
+ from urllib.parse import urlparse
137
+ request_scheme = urlparse(request.url).scheme
138
+ if request_scheme == "https" and request.proxy.get("https"):
139
+ httpx_proxy_config = request.proxy["https"]
140
+ elif request.proxy.get("http"):
141
+ httpx_proxy_config = request.proxy["http"]
142
+ else:
143
+ # 如果没有匹配的,尝试使用任意一个
144
+ httpx_proxy_config = next(iter(request.proxy.values()), None)
145
+ if httpx_proxy_config:
146
+ self.logger.warning(
147
+ f"No specific proxy for scheme '{request_scheme}', using '{httpx_proxy_config}'"
148
+ )
149
+
150
+ # 如果成功解析出代理配置,则创建临时 client
151
+ if httpx_proxy_config:
152
+ try:
153
+ # --- 4. 创建临时 client,配置代理 ---
154
+ # 使用在 open() 中保存的配置
155
+ temp_client = AsyncClient(
156
+ timeout=self._client_timeout,
157
+ limits=self._client_limits,
158
+ verify=self._client_verify,
159
+ http2=self._client_http2,
160
+ follow_redirects=True, # 确保继承
161
+ proxy=httpx_proxy_config, # 设置代理
162
+ )
163
+ effective_client = temp_client
164
+ used_proxy = httpx_proxy_config # 记录使用的代理
165
+ self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
166
+ except Exception as e:
167
+ self.logger.error(
168
+ f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
169
+ # 出错则回退到使用主 client(无代理)
170
+ # 可以选择抛出异常或继续
171
+ # raise # 如果希望代理失败导致请求失败,取消注释
172
+
173
+ # --- 5. 发送请求 (带降级逻辑) ---
174
+ try:
175
+ httpx_response = await effective_client.request(**kwargs)
176
+ except NETWORK_EXCEPTIONS as proxy_error:
177
+ # --- 优雅降级逻辑 ---
178
+ # 如果我们刚刚尝试使用了代理 (temp_client) 并且失败了
179
+ if temp_client is not None and effective_client is temp_client:
180
+ # 记录警告日志
181
+ self.logger.warning(
182
+ f"代理请求失败 ({used_proxy}), 正在尝试直连: {request.url} | 错误: {repr(proxy_error)}"
183
+ )
184
+ # 关闭失败的临时客户端
185
+ await temp_client.aclose()
186
+ temp_client = None # 防止 finally 再次关闭
187
+
188
+ # 切换到主客户端(直连)
189
+ effective_client = self._client
190
+ # 再次尝试发送请求
191
+ httpx_response = await effective_client.request(**kwargs)
192
+ else:
193
+ # 如果是主客户端(直连)失败,或者不是网络错误,则重新抛出
194
+ raise
195
+
196
+ # --- 6. 安全检查:防止大响应体 ---
197
+ content_length = httpx_response.headers.get("Content-Length")
198
+ if content_length and int(content_length) > self.max_download_size:
199
+ await httpx_response.aclose() # 立即关闭连接,释放资源
200
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
201
+
202
+ # --- 7. 读取响应体 ---
203
+ body = await httpx_response.aread()
204
+
205
+ # --- 8. 记录下载统计 ---
206
+ if start_time:
207
+ download_time = time.time() - start_time
208
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
209
+
210
+ # --- 9. 构造并返回 Response ---
211
+ return self.structure_response(request=request, response=httpx_response, body=body)
212
+
213
+ except httpx.TimeoutException as e:
214
+ self.logger.error(f"Timeout error for {request.url}: {e}")
215
+ raise
216
+ except httpx.NetworkError as e:
217
+ self.logger.error(f"Network error for {request.url}: {e}")
218
+ raise
219
+ except httpx.HTTPStatusError as e:
220
+ self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
221
+ # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
222
+ # 如果需要在此处 raise,可取消注释下一行
223
+ # raise
224
+ # 读取响应体以便 structure_response 处理
225
+ try:
226
+ error_body = await e.response.aread()
227
+ except Exception:
228
+ error_body = b"" # 如果读取错误响应体失败,则为空
229
+ return self.structure_response(request=request, response=e.response, body=error_body)
230
+ except Exception as e:
231
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
232
+ raise
233
+
234
+ finally:
235
+ # --- 10. 清理:关闭临时 client ---
236
+ # 如果创建了临时 client,则关闭它
237
+ if temp_client:
238
+ try:
239
+ await temp_client.aclose()
240
+ # self.logger.debug("Closed temporary client.")
241
+ except Exception as e:
242
+ self.logger.warning(f"Error closing temporary client: {e}")
243
+
244
+ @staticmethod
245
+ def structure_response(request, response: httpx.Response, body: bytes) -> Response:
246
+ return Response(
247
+ url=str(response.url), # httpx 的 URL 是对象,需转字符串
248
+ headers=dict(response.headers),
249
+ status_code=response.status_code, # 注意:使用 status_code
250
+ body=body,
251
+ request=request
252
+ )
253
+
254
+ async def close(self) -> None:
255
+ """关闭主客户端"""
256
+ if self._client:
257
+ self.logger.info("Closing HttpXDownloader client...")
258
+ await self._client.aclose()
259
+ self.logger.debug("HttpXDownloader closed.")
crawlo/event.py CHANGED
@@ -1,11 +1,11 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
-
4
- spider_error = "spider_error"
5
- spider_opened = "spider_open"
6
- spider_closed = "spider_closed"
7
- ignore_request = "ignore_request"
8
- request_scheduled = "request_scheduled"
9
- response_received = "request_received"
10
- item_successful = "item_successful"
11
- item_discard = "item_discard"
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+
4
+ spider_error = "spider_error"
5
+ spider_opened = "spider_open"
6
+ spider_closed = "spider_closed"
7
+ ignore_request = "ignore_request"
8
+ request_scheduled = "request_scheduled"
9
+ response_received = "request_received"
10
+ item_successful = "item_successful"
11
+ item_discard = "item_discard"