crawlo 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (80) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -107
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +157 -129
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/filters/redis_filter.py +119 -119
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +118 -118
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +140 -140
  27. crawlo/middleware/request_ignore.py +30 -30
  28. crawlo/middleware/response_code.py +18 -18
  29. crawlo/middleware/response_filter.py +26 -26
  30. crawlo/middleware/retry.py +90 -89
  31. crawlo/network/__init__.py +7 -7
  32. crawlo/network/request.py +205 -155
  33. crawlo/network/response.py +166 -93
  34. crawlo/pipelines/__init__.py +13 -13
  35. crawlo/pipelines/console_pipeline.py +39 -39
  36. crawlo/pipelines/mongo_pipeline.py +116 -116
  37. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  38. crawlo/pipelines/mysql_pipeline.py +195 -176
  39. crawlo/pipelines/pipeline_manager.py +56 -56
  40. crawlo/settings/__init__.py +7 -7
  41. crawlo/settings/default_settings.py +93 -89
  42. crawlo/settings/setting_manager.py +99 -99
  43. crawlo/spider/__init__.py +36 -36
  44. crawlo/stats_collector.py +59 -47
  45. crawlo/subscriber.py +106 -27
  46. crawlo/task_manager.py +27 -27
  47. crawlo/templates/item_template.tmpl +21 -21
  48. crawlo/templates/project_template/main.py +32 -32
  49. crawlo/templates/project_template/setting.py +189 -189
  50. crawlo/templates/spider_template.tmpl +30 -30
  51. crawlo/utils/__init__.py +7 -7
  52. crawlo/utils/concurrency_manager.py +125 -0
  53. crawlo/utils/date_tools.py +177 -177
  54. crawlo/utils/func_tools.py +82 -82
  55. crawlo/utils/log.py +39 -39
  56. crawlo/utils/pqueue.py +173 -173
  57. crawlo/utils/project.py +59 -59
  58. crawlo/utils/request.py +122 -85
  59. crawlo/utils/system.py +11 -11
  60. crawlo/utils/tools.py +303 -0
  61. crawlo/utils/url.py +39 -39
  62. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/METADATA +48 -36
  63. crawlo-1.0.3.dist-info/RECORD +80 -0
  64. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/top_level.txt +1 -0
  65. tests/__init__.py +7 -0
  66. tests/baidu_spider/__init__.py +7 -0
  67. tests/baidu_spider/demo.py +94 -0
  68. tests/baidu_spider/items.py +25 -0
  69. tests/baidu_spider/middleware.py +49 -0
  70. tests/baidu_spider/pipeline.py +55 -0
  71. tests/baidu_spider/request_fingerprints.txt +9 -0
  72. tests/baidu_spider/run.py +27 -0
  73. tests/baidu_spider/settings.py +78 -0
  74. tests/baidu_spider/spiders/__init__.py +7 -0
  75. tests/baidu_spider/spiders/bai_du.py +61 -0
  76. tests/baidu_spider/spiders/sina.py +79 -0
  77. crawlo-1.0.1.dist-info/RECORD +0 -67
  78. crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
  79. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/WHEEL +0 -0
  80. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/entry_points.txt +0 -0
@@ -1,96 +1,259 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional
4
- from aiohttp import ClientSession, TCPConnector, BaseConnector, ClientTimeout, ClientResponse, TraceConfig
5
-
6
- from crawlo import Response
7
- from crawlo.downloader import DownloaderBase
8
-
9
-
10
- class AioHttpDownloader(DownloaderBase):
11
- def __init__(self, crawler):
12
- super().__init__(crawler)
13
- self.session: Optional[ClientSession] = None
14
- self.connector: Optional[BaseConnector] = None
15
- self._verify_ssl: Optional[bool] = None
16
- self._timeout: Optional[ClientTimeout] = None
17
- self._use_session: Optional[bool] = None
18
- self.trace_config: Optional[TraceConfig] = None
19
-
20
- self.request_method = {
21
- "get": self._get,
22
- "post": self._post
23
- }
24
-
25
- def open(self):
26
- super().open()
27
- self._timeout = ClientTimeout(total=self.crawler.settings.get_int("DOWNLOAD_TIMEOUT"))
28
- self._verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL")
29
- self._use_session = self.crawler.settings.get_bool("USE_SESSION")
30
- self.trace_config = TraceConfig()
31
- self.trace_config.on_request_start.append(self.request_start)
32
- if self._use_session:
33
- self.connector = TCPConnector(verify_ssl=self._verify_ssl)
34
- self.session = ClientSession(
35
- connector=self.connector, timeout=self._timeout, trace_configs=[self.trace_config]
36
- )
37
-
38
- async def download(self, request) -> Optional[Response]:
39
- try:
40
- if self._use_session:
41
- response = await self.send_request(self.session, request)
42
- body = await response.content.read()
43
- else:
44
- connector = TCPConnector(verify_ssl=self._verify_ssl)
45
- async with ClientSession(
46
- connector=connector, timeout=self._timeout, trace_configs=[self.trace_config]
47
- ) as session:
48
- response = await self.send_request(session, request)
49
- body = await response.content.read()
50
- except Exception as exp:
51
- self.logger.error(f"Error downloading {request}: {exp}")
52
- raise exp
53
-
54
- return self.structure_response(request=request, response=response, body=body)
55
-
56
- @staticmethod
57
- def structure_response(request, response, body):
58
- return Response(
59
- url=response.url,
60
- headers=dict(response.headers),
61
- status_code=response.status,
62
- body=body,
63
- request=request
64
- )
65
-
66
- async def send_request(self, session, request) -> ClientResponse:
67
- return await self.request_method[request.method.lower()](session, request)
68
-
69
- @staticmethod
70
- async def _get(session, request) -> ClientResponse:
71
- response = await session.get(
72
- request.url,
73
- headers=request.headers,
74
- cookies=request.cookies
75
- )
76
- return response
77
-
78
- @staticmethod
79
- async def _post(session, request) -> ClientResponse:
80
- response = await session.post(
81
- request.url,
82
- data=request.body,
83
- headers=request.headers,
84
- cookies=request.cookies,
85
- proxy=request.proxy,
86
- )
87
- return response
88
-
89
- async def request_start(self, _session, _trace_config_ctx, params):
90
- self.logger.debug(f"Request start: {params.url}, method:{params.method}")
91
-
92
- async def close(self) -> None:
93
- if self.connector:
94
- await self.connector.close()
95
- if self.session:
96
- await self.session.close()
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from typing import Optional
4
+ from aiohttp import (
5
+ ClientSession,
6
+ TCPConnector,
7
+ ClientTimeout,
8
+ TraceConfig,
9
+ ClientResponse, ClientError,
10
+ )
11
+
12
+ from crawlo import Response
13
+ from crawlo.downloader import DownloaderBase
14
+
15
+
16
+ class AioHttpDownloader(DownloaderBase):
17
+ """
18
+ 高性能异步下载器
19
+ - 基于持久化 ClientSession
20
+ - 智能识别 Request 的高层语义(json_body/form_data)
21
+ - 支持 GET/POST/PUT/DELETE 等方法
22
+ - 内存安全防护
23
+ """
24
+
25
+ def __init__(self, crawler):
26
+ super().__init__(crawler)
27
+ self.session: Optional[ClientSession] = None
28
+ self.max_download_size: int = 0
29
+
30
+ def open(self):
31
+ super().open()
32
+ self.logger.info("Opening AioHttpDownloader")
33
+
34
+ # 读取配置
35
+ timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
36
+ verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
37
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
38
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
39
+ self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
40
+
41
+ # 创建连接器
42
+ connector = TCPConnector(
43
+ verify_ssl=verify_ssl,
44
+ limit=pool_limit,
45
+ limit_per_host=pool_per_host,
46
+ ttl_dns_cache=300,
47
+ keepalive_timeout=15,
48
+ force_close=False,
49
+ )
50
+
51
+ # 超时控制
52
+ timeout = ClientTimeout(total=timeout_secs)
53
+
54
+ # 请求追踪
55
+ trace_config = TraceConfig()
56
+ trace_config.on_request_start.append(self._on_request_start)
57
+ trace_config.on_request_end.append(self._on_request_end)
58
+ trace_config.on_request_exception.append(self._on_request_exception)
59
+
60
+ # 创建全局 session
61
+ self.session = ClientSession(
62
+ connector=connector,
63
+ timeout=timeout,
64
+ trace_configs=[trace_config],
65
+ auto_decompress=True,
66
+ )
67
+
68
+ self.logger.debug("AioHttpDownloader initialized.")
69
+
70
+ async def download(self, request) -> Optional[Response]:
71
+ if not self.session or self.session.closed:
72
+ raise RuntimeError("AioHttpDownloader session is not open.")
73
+
74
+ try:
75
+ # 使用通用发送逻辑(支持所有 HTTP 方法)
76
+ async with await self._send_request(self.session, request) as resp:
77
+ # 安全检查:防止大响应体导致 OOM
78
+ content_length = resp.headers.get("Content-Length")
79
+ if content_length and int(content_length) > self.max_download_size:
80
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
81
+
82
+ body = await resp.read()
83
+ return self._structure_response(request, resp, body)
84
+
85
+ except ClientError as e:
86
+ self.logger.error(f"Client error for {request.url}: {e}")
87
+ raise
88
+ except Exception as e:
89
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
90
+ raise
91
+
92
+ @staticmethod
93
+ async def _send_request(session: ClientSession, request) -> ClientResponse:
94
+ """
95
+ 根据请求方法和高层语义智能发送请求。
96
+ 利用 aiohttp 内建方法(.get/.post 等),避免重复代码。
97
+ """
98
+ method = request.method.lower()
99
+ if not hasattr(session, method):
100
+ raise ValueError(f"Unsupported HTTP method: {request.method}")
101
+
102
+ method_func = getattr(session, method)
103
+
104
+ # 构造参数
105
+ kwargs = {
106
+ "headers": request.headers,
107
+ "cookies": request.cookies,
108
+ "proxy": request.proxy,
109
+ "allow_redirects": request.allow_redirects,
110
+ }
111
+
112
+ # 关键优化:如果原始请求使用了 json_body,则使用 json= 参数
113
+ if hasattr(request, "_json_body") and request._json_body is not None:
114
+ kwargs["json"] = request._json_body # 让 aiohttp 自动处理序列化 + Content-Type
115
+ elif isinstance(request.body, (dict, list)):
116
+ # 兼容直接传 body=dict 的旧写法
117
+ kwargs["json"] = request.body
118
+ else:
119
+ # 其他情况(表单、bytes、str)走 data=
120
+ if request.body is not None:
121
+ kwargs["data"] = request.body
122
+
123
+ return await method_func(request.url, **kwargs)
124
+
125
+ @staticmethod
126
+ def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
127
+ """构造框架所需的 Response 对象"""
128
+ return Response(
129
+ url=str(resp.url),
130
+ headers=dict(resp.headers),
131
+ status_code=resp.status,
132
+ body=body,
133
+ request=request,
134
+ )
135
+
136
+ # --- 请求追踪日志 ---
137
+ async def _on_request_start(self, session, trace_config_ctx, params):
138
+ """请求开始时的回调。"""
139
+ self.logger.debug(f"Requesting: {params.method} {params.url}")
140
+
141
+ async def _on_request_end(self, session, trace_config_ctx, params):
142
+ """请求成功结束时的回调。"""
143
+ # 正确方式:直接从 params 获取响应对象
144
+ response = params.response
145
+ self.logger.debug(
146
+ f"Finished: {params.method} {params.url} with status {response.status}"
147
+ )
148
+
149
+ async def _on_request_exception(self, session, trace_config_ctx, params):
150
+ """请求发生异常时的回调。"""
151
+ # 正确方式:通过 .exception 属性获取异常
152
+ exc = trace_config_ctx.exception
153
+ self.logger.warning(
154
+ f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
155
+ )
156
+
157
+ async def close(self) -> None:
158
+ """关闭会话资源"""
159
+ if self.session and not self.session.closed:
160
+ self.logger.info("Closing AioHttpDownloader session...")
161
+ await self.session.close()
162
+ self.logger.debug("AioHttpDownloader closed.")
163
+
164
+ # #!/usr/bin/python
165
+ # # -*- coding:UTF-8 -*-
166
+ # from typing import Optional
167
+ # from aiohttp import ClientSession, TCPConnector, BaseConnector, ClientTimeout, ClientResponse, TraceConfig
168
+ #
169
+ # from crawlo import Response
170
+ # from crawlo.downloader import DownloaderBase
171
+ #
172
+ #
173
+ # class AioHttpDownloader(DownloaderBase):
174
+ # def __init__(self, crawler):
175
+ # super().__init__(crawler)
176
+ # self.session: Optional[ClientSession] = None
177
+ # self.connector: Optional[BaseConnector] = None
178
+ # self._verify_ssl: Optional[bool] = None
179
+ # self._timeout: Optional[ClientTimeout] = None
180
+ # self._use_session: Optional[bool] = None
181
+ # self.trace_config: Optional[TraceConfig] = None
182
+ #
183
+ # self.request_method = {
184
+ # "get": self._get,
185
+ # "post": self._post
186
+ # }
187
+ #
188
+ # def open(self):
189
+ # super().open()
190
+ # self._timeout = ClientTimeout(total=self.crawler.settings.get_int("DOWNLOAD_TIMEOUT"))
191
+ # self._verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL")
192
+ # self._use_session = self.crawler.settings.get_bool("USE_SESSION")
193
+ # self.trace_config = TraceConfig()
194
+ # self.trace_config.on_request_start.append(self.request_start)
195
+ # if self._use_session:
196
+ # self.connector = TCPConnector(verify_ssl=self._verify_ssl)
197
+ # self.session = ClientSession(
198
+ # connector=self.connector, timeout=self._timeout, trace_configs=[self.trace_config]
199
+ # )
200
+ #
201
+ # async def download(self, request) -> Optional[Response]:
202
+ # try:
203
+ # if self._use_session:
204
+ # response = await self.send_request(self.session, request)
205
+ # body = await response.content.read()
206
+ # else:
207
+ # connector = TCPConnector(verify_ssl=self._verify_ssl)
208
+ # async with ClientSession(
209
+ # connector=connector, timeout=self._timeout, trace_configs=[self.trace_config]
210
+ # ) as session:
211
+ # response = await self.send_request(session, request)
212
+ # body = await response.content.read()
213
+ # except Exception as exp:
214
+ # self.logger.error(f"Error downloading {request}: {exp}")
215
+ # raise exp
216
+ #
217
+ # return self.structure_response(request=request, response=response, body=body)
218
+ #
219
+ # @staticmethod
220
+ # def structure_response(request, response, body):
221
+ # return Response(
222
+ # url=response.url,
223
+ # headers=dict(response.headers),
224
+ # status_code=response.status,
225
+ # body=body,
226
+ # request=request
227
+ # )
228
+ #
229
+ # async def send_request(self, session, request) -> ClientResponse:
230
+ # return await self.request_method[request.method.lower()](session, request)
231
+ #
232
+ # @staticmethod
233
+ # async def _get(session, request) -> ClientResponse:
234
+ # response = await session.get(
235
+ # request.url,
236
+ # headers=request.headers,
237
+ # cookies=request.cookies
238
+ # )
239
+ # return response
240
+ #
241
+ # @staticmethod
242
+ # async def _post(session, request) -> ClientResponse:
243
+ # response = await session.post(
244
+ # request.url,
245
+ # data=request.body,
246
+ # headers=request.headers,
247
+ # cookies=request.cookies,
248
+ # proxy=request.proxy,
249
+ # )
250
+ # return response
251
+ #
252
+ # async def request_start(self, _session, _trace_config_ctx, params):
253
+ # self.logger.debug(f"Request start: {params.url}, method:{params.method}")
254
+ #
255
+ # async def close(self) -> None:
256
+ # if self.connector:
257
+ # await self.connector.close()
258
+ # if self.session:
259
+ # await self.session.close()
@@ -1,48 +1,187 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional
4
- from httpx import AsyncClient, Timeout
5
-
6
- from crawlo import Response
7
- from crawlo.downloader import DownloaderBase
8
-
9
-
10
- class HttpXDownloader(DownloaderBase):
11
- def __init__(self, crawler):
12
- super().__init__(crawler)
13
- self._client: Optional[AsyncClient] = None
14
- self._timeout: Optional[Timeout] = None
15
-
16
- def open(self):
17
- super().open()
18
- timeout = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT")
19
- self._timeout = Timeout(timeout=timeout)
20
-
21
- async def download(self, request) -> Optional[Response]:
22
- try:
23
- proxies = None
24
- async with AsyncClient(timeout=self._timeout, proxy=proxies) as client:
25
- self.logger.debug(f"request downloading: {request.url},method: {request.method}")
26
- response = await client.request(
27
- url=request.url,
28
- method=request.method,
29
- headers=request.headers,
30
- cookies=request.cookies,
31
- data=request.body
32
- )
33
- body = await response.aread()
34
- except Exception as exp:
35
- self.logger.error(f"Error downloading {request}: {exp}")
36
- raise exp
37
-
38
- return self.structure_response(request=request, response=response, body=body)
39
-
40
- @staticmethod
41
- def structure_response(request, response, body) -> Response:
42
- return Response(
43
- url=response.url,
44
- headers=dict(response.headers),
45
- status_code=response.status_code,
46
- body=body,
47
- request=request
48
- )
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import httpx
4
+ from typing import Optional
5
+ from httpx import AsyncClient, Timeout, Limits
6
+
7
+ from crawlo import Response
8
+ from crawlo.downloader import DownloaderBase
9
+
10
+
11
+ class HttpXDownloader(DownloaderBase):
12
+ """
13
+ 基于 httpx 的高性能异步下载器
14
+ - 使用持久化 AsyncClient(推荐做法)
15
+ - 支持连接池、HTTP/2、透明代理
16
+ - 智能处理 Request 的 json_body 和 form_data
17
+ """
18
+
19
+ def __init__(self, crawler):
20
+ super().__init__(crawler)
21
+ self._client: Optional[AsyncClient] = None
22
+ self._timeout: Optional[Timeout] = None
23
+ self._limits: Optional[Limits] = None
24
+
25
+ def open(self):
26
+ super().open()
27
+ self.logger.info("Opening HttpXDownloader")
28
+
29
+ # 读取配置
30
+ timeout_total = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
31
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
32
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
33
+ max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
34
+
35
+ # 保存配置
36
+ self.max_download_size = max_download_size
37
+
38
+ # 配置超时(更精细)
39
+ self._timeout = Timeout(
40
+ connect=10.0, # 建立连接超时
41
+ read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
42
+ write=10.0, # 发送数据超时
43
+ pool=1.0 # 从连接池获取连接的超时
44
+ )
45
+
46
+ # 配置连接池限制
47
+ self._limits = Limits(
48
+ max_connections=pool_limit,
49
+ max_keepalive_connections=pool_per_host
50
+ )
51
+
52
+ # 创建持久化客户端
53
+ # verify=False 对应 VERIFY_SSL=False
54
+ verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
55
+
56
+ self._client = AsyncClient(
57
+ timeout=self._timeout,
58
+ limits=self._limits,
59
+ verify=verify_ssl,
60
+ http2=True, # 启用 HTTP/2 支持
61
+ follow_redirects=True, # 自动跟随重定向
62
+ )
63
+
64
+ self.logger.debug("HttpXDownloader initialized.")
65
+
66
+ async def download(self, request) -> Optional[Response]:
67
+ if not self._client:
68
+ raise RuntimeError("HttpXDownloader client is not available.")
69
+
70
+ try:
71
+ # 构造发送参数
72
+ kwargs = {
73
+ "url": request.url,
74
+ "headers": request.headers,
75
+ "cookies": request.cookies,
76
+ "follow_redirects": request.allow_redirects,
77
+ }
78
+
79
+ # 智能处理 body(关键优化)
80
+ if hasattr(request, "_json_body") and request._json_body is not None:
81
+ kwargs["json"] = request._json_body # 让 httpx 处理序列化
82
+ elif isinstance(request.body, (dict, list)):
83
+ kwargs["json"] = request.body
84
+ else:
85
+ kwargs["content"] = request.body # 使用 content 而不是 data
86
+
87
+ # 设置代理
88
+ if request.proxy:
89
+ kwargs["proxy"] = request.proxy
90
+
91
+ # 发送请求
92
+ response = await self._client.request(
93
+ method=request.method,
94
+ **kwargs
95
+ )
96
+
97
+ # 安全检查:防止大响应体
98
+ content_length = response.headers.get("Content-Length")
99
+ if content_length and int(content_length) > self.max_download_size:
100
+ response.close() # 立即关闭连接,释放资源
101
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
102
+
103
+ # 读取响应体
104
+ body = await response.aread()
105
+
106
+ return self.structure_response(request=request, response=response, body=body)
107
+
108
+ except httpx.TimeoutException as e:
109
+ self.logger.error(f"Timeout error for {request.url}: {e}")
110
+ raise
111
+ except httpx.NetworkError as e:
112
+ self.logger.error(f"Network error for {request.url}: {e}")
113
+ raise
114
+ except httpx.HTTPStatusError as e:
115
+ self.logger.warning(f"HTTP {e.response.status_code} for {request.url}: {e}")
116
+ # 即使是 4xx/5xx,也返回 Response,由上层逻辑(如 spider)处理
117
+ # 如果需要在此处 raise,可取消注释下一行
118
+ # raise
119
+ return self.structure_response(request=request, response=e.response, body=b"")
120
+ except Exception as e:
121
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
122
+ raise
123
+
124
+ @staticmethod
125
+ def structure_response(request, response, body: bytes) -> Response:
126
+ return Response(
127
+ url=str(response.url), # httpx 的 URL 是对象,需转字符串
128
+ headers=dict(response.headers),
129
+ status_code=response.status_code,
130
+ body=body,
131
+ request=request
132
+ )
133
+
134
+ async def close(self) -> None:
135
+ """关闭客户端"""
136
+ if self._client:
137
+ self.logger.info("Closing HttpXDownloader client...")
138
+ await self._client.aclose()
139
+ self.logger.debug("HttpXDownloader closed.")
140
+ # #!/usr/bin/python
141
+ # # -*- coding:UTF-8 -*-
142
+ # from typing import Optional
143
+ # from httpx import AsyncClient, Timeout
144
+ #
145
+ # from crawlo import Response
146
+ # from crawlo.downloader import DownloaderBase
147
+ #
148
+ #
149
+ # class HttpXDownloader(DownloaderBase):
150
+ # def __init__(self, crawler):
151
+ # super().__init__(crawler)
152
+ # self._client: Optional[AsyncClient] = None
153
+ # self._timeout: Optional[Timeout] = None
154
+ #
155
+ # def open(self):
156
+ # super().open()
157
+ # timeout = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT")
158
+ # self._timeout = Timeout(timeout=timeout)
159
+ #
160
+ # async def download(self, request) -> Optional[Response]:
161
+ # try:
162
+ # proxies = None
163
+ # async with AsyncClient(timeout=self._timeout, proxy=proxies) as client:
164
+ # self.logger.debug(f"request downloading: {request.url},method: {request.method}")
165
+ # response = await client.request(
166
+ # url=request.url,
167
+ # method=request.method,
168
+ # headers=request.headers,
169
+ # cookies=request.cookies,
170
+ # data=request.body
171
+ # )
172
+ # body = await response.aread()
173
+ # except Exception as exp:
174
+ # self.logger.error(f"Error downloading {request}: {exp}")
175
+ # raise exp
176
+ #
177
+ # return self.structure_response(request=request, response=response, body=body)
178
+ #
179
+ # @staticmethod
180
+ # def structure_response(request, response, body) -> Response:
181
+ # return Response(
182
+ # url=response.url,
183
+ # headers=dict(response.headers),
184
+ # status_code=response.status_code,
185
+ # body=body,
186
+ # request=request
187
+ # )