crawlo 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (186) hide show
  1. crawlo/__init__.py +61 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/cli.py +40 -40
  8. crawlo/commands/__init__.py +13 -13
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/list.py +155 -155
  12. crawlo/commands/run.py +285 -285
  13. crawlo/commands/startproject.py +300 -196
  14. crawlo/commands/stats.py +188 -188
  15. crawlo/commands/utils.py +186 -186
  16. crawlo/config.py +309 -279
  17. crawlo/config_validator.py +253 -0
  18. crawlo/core/__init__.py +2 -2
  19. crawlo/core/engine.py +346 -172
  20. crawlo/core/processor.py +40 -40
  21. crawlo/core/scheduler.py +137 -166
  22. crawlo/crawler.py +1027 -1027
  23. crawlo/downloader/__init__.py +266 -242
  24. crawlo/downloader/aiohttp_downloader.py +220 -212
  25. crawlo/downloader/cffi_downloader.py +256 -251
  26. crawlo/downloader/httpx_downloader.py +259 -259
  27. crawlo/downloader/hybrid_downloader.py +214 -0
  28. crawlo/downloader/playwright_downloader.py +403 -0
  29. crawlo/downloader/selenium_downloader.py +473 -0
  30. crawlo/event.py +11 -11
  31. crawlo/exceptions.py +81 -81
  32. crawlo/extension/__init__.py +37 -37
  33. crawlo/extension/health_check.py +141 -141
  34. crawlo/extension/log_interval.py +57 -57
  35. crawlo/extension/log_stats.py +81 -81
  36. crawlo/extension/logging_extension.py +43 -43
  37. crawlo/extension/memory_monitor.py +104 -88
  38. crawlo/extension/performance_profiler.py +133 -117
  39. crawlo/extension/request_recorder.py +107 -107
  40. crawlo/filters/__init__.py +154 -154
  41. crawlo/filters/aioredis_filter.py +280 -242
  42. crawlo/filters/memory_filter.py +269 -269
  43. crawlo/items/__init__.py +23 -23
  44. crawlo/items/base.py +21 -21
  45. crawlo/items/fields.py +53 -53
  46. crawlo/items/items.py +104 -104
  47. crawlo/middleware/__init__.py +21 -21
  48. crawlo/middleware/default_header.py +32 -32
  49. crawlo/middleware/download_delay.py +28 -28
  50. crawlo/middleware/middleware_manager.py +135 -135
  51. crawlo/middleware/proxy.py +272 -248
  52. crawlo/middleware/request_ignore.py +30 -30
  53. crawlo/middleware/response_code.py +18 -18
  54. crawlo/middleware/response_filter.py +26 -26
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/mode_manager.py +206 -201
  57. crawlo/network/__init__.py +21 -21
  58. crawlo/network/request.py +338 -311
  59. crawlo/network/response.py +360 -271
  60. crawlo/pipelines/__init__.py +21 -21
  61. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  62. crawlo/pipelines/console_pipeline.py +39 -39
  63. crawlo/pipelines/csv_pipeline.py +316 -316
  64. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  65. crawlo/pipelines/json_pipeline.py +218 -218
  66. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  67. crawlo/pipelines/mongo_pipeline.py +131 -131
  68. crawlo/pipelines/mysql_pipeline.py +316 -316
  69. crawlo/pipelines/pipeline_manager.py +56 -56
  70. crawlo/pipelines/redis_dedup_pipeline.py +166 -162
  71. crawlo/project.py +153 -153
  72. crawlo/queue/pqueue.py +37 -37
  73. crawlo/queue/queue_manager.py +320 -307
  74. crawlo/queue/redis_priority_queue.py +277 -209
  75. crawlo/settings/__init__.py +7 -7
  76. crawlo/settings/default_settings.py +216 -278
  77. crawlo/settings/setting_manager.py +99 -99
  78. crawlo/spider/__init__.py +639 -639
  79. crawlo/stats_collector.py +59 -59
  80. crawlo/subscriber.py +130 -130
  81. crawlo/task_manager.py +30 -30
  82. crawlo/templates/crawlo.cfg.tmpl +10 -10
  83. crawlo/templates/project/__init__.py.tmpl +3 -3
  84. crawlo/templates/project/items.py.tmpl +17 -17
  85. crawlo/templates/project/middlewares.py.tmpl +110 -110
  86. crawlo/templates/project/pipelines.py.tmpl +97 -97
  87. crawlo/templates/project/run.py.tmpl +251 -251
  88. crawlo/templates/project/settings.py.tmpl +326 -279
  89. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  90. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  91. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  92. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  93. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  94. crawlo/templates/spider/spider.py.tmpl +141 -141
  95. crawlo/tools/__init__.py +183 -0
  96. crawlo/tools/anti_crawler.py +269 -0
  97. crawlo/tools/authenticated_proxy.py +241 -0
  98. crawlo/tools/data_validator.py +181 -0
  99. crawlo/tools/date_tools.py +36 -0
  100. crawlo/tools/distributed_coordinator.py +387 -0
  101. crawlo/tools/retry_mechanism.py +221 -0
  102. crawlo/tools/scenario_adapter.py +263 -0
  103. crawlo/utils/__init__.py +35 -7
  104. crawlo/utils/batch_processor.py +261 -0
  105. crawlo/utils/controlled_spider_mixin.py +439 -439
  106. crawlo/utils/date_tools.py +290 -233
  107. crawlo/utils/db_helper.py +343 -343
  108. crawlo/utils/enhanced_error_handler.py +360 -0
  109. crawlo/utils/env_config.py +106 -0
  110. crawlo/utils/error_handler.py +126 -0
  111. crawlo/utils/func_tools.py +82 -82
  112. crawlo/utils/large_scale_config.py +286 -286
  113. crawlo/utils/large_scale_helper.py +343 -343
  114. crawlo/utils/log.py +128 -128
  115. crawlo/utils/performance_monitor.py +285 -0
  116. crawlo/utils/queue_helper.py +175 -175
  117. crawlo/utils/redis_connection_pool.py +335 -0
  118. crawlo/utils/redis_key_validator.py +200 -0
  119. crawlo/utils/request.py +267 -267
  120. crawlo/utils/request_serializer.py +219 -219
  121. crawlo/utils/spider_loader.py +62 -62
  122. crawlo/utils/system.py +11 -11
  123. crawlo/utils/tools.py +4 -4
  124. crawlo/utils/url.py +39 -39
  125. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/METADATA +401 -403
  126. crawlo-1.1.5.dist-info/RECORD +185 -0
  127. examples/__init__.py +7 -7
  128. tests/__init__.py +7 -7
  129. tests/advanced_tools_example.py +276 -0
  130. tests/authenticated_proxy_example.py +237 -0
  131. tests/cleaners_example.py +161 -0
  132. tests/config_validation_demo.py +103 -0
  133. {examples → tests}/controlled_spider_example.py +205 -205
  134. tests/date_tools_example.py +181 -0
  135. tests/dynamic_loading_example.py +524 -0
  136. tests/dynamic_loading_test.py +105 -0
  137. tests/env_config_example.py +134 -0
  138. tests/error_handling_example.py +172 -0
  139. tests/redis_key_validation_demo.py +131 -0
  140. tests/response_improvements_example.py +145 -0
  141. tests/test_advanced_tools.py +149 -0
  142. tests/test_all_redis_key_configs.py +146 -0
  143. tests/test_authenticated_proxy.py +142 -0
  144. tests/test_cleaners.py +55 -0
  145. tests/test_comprehensive.py +147 -0
  146. tests/test_config_validator.py +194 -0
  147. tests/test_date_tools.py +124 -0
  148. tests/test_dynamic_downloaders_proxy.py +125 -0
  149. tests/test_dynamic_proxy.py +93 -0
  150. tests/test_dynamic_proxy_config.py +147 -0
  151. tests/test_dynamic_proxy_real.py +110 -0
  152. tests/test_edge_cases.py +304 -0
  153. tests/test_enhanced_error_handler.py +271 -0
  154. tests/test_env_config.py +122 -0
  155. tests/test_error_handler_compatibility.py +113 -0
  156. tests/test_final_validation.py +153 -153
  157. tests/test_framework_env_usage.py +104 -0
  158. tests/test_integration.py +357 -0
  159. tests/test_item_dedup_redis_key.py +123 -0
  160. tests/test_parsel.py +30 -0
  161. tests/test_performance.py +328 -0
  162. tests/test_proxy_health_check.py +32 -32
  163. tests/test_proxy_middleware_integration.py +136 -136
  164. tests/test_proxy_providers.py +56 -56
  165. tests/test_proxy_stats.py +19 -19
  166. tests/test_proxy_strategies.py +59 -59
  167. tests/test_queue_manager_redis_key.py +177 -0
  168. tests/test_redis_config.py +28 -28
  169. tests/test_redis_connection_pool.py +295 -0
  170. tests/test_redis_key_naming.py +182 -0
  171. tests/test_redis_key_validator.py +124 -0
  172. tests/test_redis_queue.py +224 -224
  173. tests/test_request_serialization.py +70 -70
  174. tests/test_response_improvements.py +153 -0
  175. tests/test_scheduler.py +241 -241
  176. tests/test_simple_response.py +62 -0
  177. tests/test_telecom_spider_redis_key.py +206 -0
  178. tests/test_template_content.py +88 -0
  179. tests/test_template_redis_key.py +135 -0
  180. tests/test_tools.py +154 -0
  181. tests/tools_example.py +258 -0
  182. crawlo/core/enhanced_engine.py +0 -190
  183. crawlo-1.1.4.dist-info/RECORD +0 -117
  184. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  185. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  186. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
@@ -1,213 +1,221 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- from yarl import URL
4
- from typing import Optional
5
- from aiohttp import (
6
- ClientSession,
7
- TCPConnector,
8
- ClientTimeout,
9
- TraceConfig,
10
- ClientResponse,
11
- ClientError,
12
- BasicAuth,
13
- )
14
-
15
- from crawlo.network.response import Response
16
- from crawlo.utils.log import get_logger
17
- from crawlo.downloader import DownloaderBase
18
-
19
-
20
- class AioHttpDownloader(DownloaderBase):
21
- """
22
- 高性能异步下载器
23
- - 基于持久化 ClientSession
24
- - 智能识别 Request 的高层语义(json_body/form_data)
25
- - 支持 GET/POST/PUT/DELETE 等方法
26
- - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
- - 内存安全防护
28
- """
29
-
30
- def __init__(self, crawler):
31
- super().__init__(crawler)
32
- self.session: Optional[ClientSession] = None
33
- self.max_download_size: int = 0
34
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
-
36
- def open(self):
37
- super().open()
38
- self.logger.info("Opening AioHttpDownloader")
39
-
40
- # 读取配置
41
- timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
- verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
- self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
-
47
- # 创建连接器
48
- connector = TCPConnector(
49
- verify_ssl=verify_ssl,
50
- limit=pool_limit,
51
- limit_per_host=pool_per_host,
52
- ttl_dns_cache=300,
53
- keepalive_timeout=15,
54
- force_close=False,
55
- )
56
-
57
- # 超时控制
58
- timeout = ClientTimeout(total=timeout_secs)
59
-
60
- # 请求追踪
61
- trace_config = TraceConfig()
62
- trace_config.on_request_start.append(self._on_request_start)
63
- trace_config.on_request_end.append(self._on_request_end)
64
- trace_config.on_request_exception.append(self._on_request_exception)
65
-
66
- # 创建全局 session
67
- self.session = ClientSession(
68
- connector=connector,
69
- timeout=timeout,
70
- trace_configs=[trace_config],
71
- auto_decompress=True,
72
- )
73
-
74
- self.logger.debug("AioHttpDownloader initialized.")
75
-
76
- async def download(self, request) -> Optional[Response]:
77
- """下载请求并返回响应"""
78
- if not self.session or self.session.closed:
79
- raise RuntimeError("AioHttpDownloader session is not open.")
80
-
81
- start_time = None
82
- if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
83
- import time
84
- start_time = time.time()
85
-
86
- try:
87
- # 使用通用发送逻辑(支持所有 HTTP 方法)
88
- async with await self._send_request(self.session, request) as resp:
89
- # 安全检查:防止大响应体导致 OOM
90
- content_length = resp.headers.get("Content-Length")
91
- if content_length and int(content_length) > self.max_download_size:
92
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
93
-
94
- body = await resp.read()
95
- response = self._structure_response(request, resp, body)
96
-
97
- # 记录下载统计
98
- if start_time:
99
- download_time = time.time() - start_time
100
- self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
101
-
102
- return response
103
-
104
- except ClientError as e:
105
- self.logger.error(f"Client error for {request.url}: {e}")
106
- raise
107
- except Exception as e:
108
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
109
- raise
110
-
111
- @staticmethod
112
- async def _send_request(session: ClientSession, request) -> ClientResponse:
113
- """
114
- 根据请求方法和高层语义智能发送请求。
115
- 支持中间件设置的 proxy,兼容以下格式:
116
- - str: "http://user:pass@host:port"
117
- - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
118
- """
119
- method = request.method.lower()
120
- if not hasattr(session, method):
121
- raise ValueError(f"Unsupported HTTP method: {request.method}")
122
-
123
- method_func = getattr(session, method)
124
-
125
- # 构造参数
126
- kwargs = {
127
- "headers": request.headers,
128
- "cookies": request.cookies,
129
- "allow_redirects": request.allow_redirects,
130
- }
131
-
132
- # === 处理代理(proxy)===
133
- proxy = getattr(request, "proxy", None)
134
- proxy_auth = None
135
-
136
- if proxy:
137
- # 兼容字典格式:{"http": "http://...", "https": "http://..."}
138
- if isinstance(proxy, dict):
139
- # 优先使用 https,否则用 http
140
- proxy = proxy.get("https") or proxy.get("http")
141
-
142
- if not isinstance(proxy, (str, URL)):
143
- raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
144
-
145
- try:
146
- proxy_url = URL(proxy)
147
- if proxy_url.scheme not in ("http", "https"):
148
- raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
149
-
150
- # 提取认证信息
151
- if proxy_url.user and proxy_url.password:
152
- proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
153
- # 去掉用户密码的 URL
154
- proxy = str(proxy_url.with_user(None))
155
- else:
156
- proxy = str(proxy_url)
157
-
158
- kwargs["proxy"] = proxy
159
- if proxy_auth:
160
- kwargs["proxy_auth"] = proxy_auth
161
-
162
- except Exception as e:
163
- raise ValueError(f"Invalid proxy URL: {proxy}") from e
164
-
165
- # === 处理请求体 ===
166
- if hasattr(request, "_json_body") and request._json_body is not None:
167
- kwargs["json"] = request._json_body
168
- elif isinstance(request.body, (dict, list)):
169
- kwargs["json"] = request.body
170
- else:
171
- if request.body is not None:
172
- kwargs["data"] = request.body
173
-
174
- return await method_func(request.url, **kwargs)
175
-
176
- @staticmethod
177
- def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
178
- """构造框架所需的 Response 对象"""
179
- return Response(
180
- url=str(resp.url),
181
- headers=dict(resp.headers),
182
- status_code=resp.status,
183
- body=body,
184
- request=request,
185
- )
186
-
187
- # --- 请求追踪日志 ---
188
- async def _on_request_start(self, session, trace_config_ctx, params):
189
- """请求开始时的回调。"""
190
- proxy = getattr(params, "proxy", None)
191
- proxy_info = f" via {proxy}" if proxy else ""
192
- self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
193
-
194
- async def _on_request_end(self, session, trace_config_ctx, params):
195
- """请求成功结束时的回调。"""
196
- response = params.response
197
- self.logger.debug(
198
- f"Finished: {params.method} {params.url} with status {response.status}"
199
- )
200
-
201
- async def _on_request_exception(self, session, trace_config_ctx, params):
202
- """请求发生异常时的回调。"""
203
- exc = params.exception
204
- self.logger.warning(
205
- f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
206
- )
207
-
208
- async def close(self) -> None:
209
- """关闭会话资源"""
210
- if self.session and not self.session.closed:
211
- self.logger.info("Closing AioHttpDownloader session...")
212
- await self.session.close()
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from yarl import URL
4
+ from typing import Optional
5
+ from aiohttp import (
6
+ ClientSession,
7
+ TCPConnector,
8
+ ClientTimeout,
9
+ TraceConfig,
10
+ ClientResponse,
11
+ ClientError,
12
+ BasicAuth,
13
+ )
14
+
15
+ from crawlo.network.response import Response
16
+ from crawlo.utils.log import get_logger
17
+ from crawlo.downloader import DownloaderBase
18
+
19
+
20
+ class AioHttpDownloader(DownloaderBase):
21
+ """
22
+ 高性能异步下载器
23
+ - 基于持久化 ClientSession
24
+ - 智能识别 Request 的高层语义(json_body/form_data)
25
+ - 支持 GET/POST/PUT/DELETE 等方法
26
+ - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
+ - 内存安全防护
28
+ """
29
+
30
+ def __init__(self, crawler):
31
+ super().__init__(crawler)
32
+ self.session: Optional[ClientSession] = None
33
+ self.max_download_size: int = 0
34
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
+
36
+ def open(self):
37
+ super().open()
38
+ self.logger.info("Opening AioHttpDownloader")
39
+
40
+ # 读取配置
41
+ timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
+ verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
+ self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
+
47
+ # 创建连接器
48
+ connector = TCPConnector(
49
+ verify_ssl=verify_ssl,
50
+ limit=pool_limit,
51
+ limit_per_host=pool_per_host,
52
+ ttl_dns_cache=300,
53
+ keepalive_timeout=15,
54
+ force_close=False,
55
+ )
56
+
57
+ # 超时控制
58
+ timeout = ClientTimeout(total=timeout_secs)
59
+
60
+ # 请求追踪
61
+ trace_config = TraceConfig()
62
+ trace_config.on_request_start.append(self._on_request_start)
63
+ trace_config.on_request_end.append(self._on_request_end)
64
+ trace_config.on_request_exception.append(self._on_request_exception)
65
+
66
+ # 创建全局 session
67
+ self.session = ClientSession(
68
+ connector=connector,
69
+ timeout=timeout,
70
+ trace_configs=[trace_config],
71
+ auto_decompress=True,
72
+ )
73
+
74
+ self.logger.debug("AioHttpDownloader initialized.")
75
+
76
+ async def download(self, request) -> Optional[Response]:
77
+ """下载请求并返回响应"""
78
+ if not self.session or self.session.closed:
79
+ raise RuntimeError("AioHttpDownloader session is not open.")
80
+
81
+ start_time = None
82
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
83
+ import time
84
+ start_time = time.time()
85
+
86
+ try:
87
+ # 使用通用发送逻辑(支持所有 HTTP 方法)
88
+ async with await self._send_request(self.session, request) as resp:
89
+ # 安全检查:防止大响应体导致 OOM
90
+ content_length = resp.headers.get("Content-Length")
91
+ if content_length and int(content_length) > self.max_download_size:
92
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
93
+
94
+ body = await resp.read()
95
+ response = self._structure_response(request, resp, body)
96
+
97
+ # 记录下载统计
98
+ if start_time:
99
+ download_time = time.time() - start_time
100
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
101
+
102
+ return response
103
+
104
+ except ClientError as e:
105
+ self.logger.error(f"Client error for {request.url}: {e}")
106
+ raise
107
+ except Exception as e:
108
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
109
+ raise
110
+
111
+ @staticmethod
112
+ async def _send_request(session: ClientSession, request) -> ClientResponse:
113
+ """
114
+ 根据请求方法和高层语义智能发送请求。
115
+ 支持中间件设置的 proxy,兼容以下格式:
116
+ - str: "http://user:pass@host:port"
117
+ - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
118
+ """
119
+ method = request.method.lower()
120
+ if not hasattr(session, method):
121
+ raise ValueError(f"Unsupported HTTP method: {request.method}")
122
+
123
+ method_func = getattr(session, method)
124
+
125
+ # 构造参数
126
+ kwargs = {
127
+ "headers": request.headers,
128
+ "cookies": request.cookies,
129
+ "allow_redirects": request.allow_redirects,
130
+ }
131
+
132
+ # === 处理代理(proxy)===
133
+ proxy = getattr(request, "proxy", None)
134
+ proxy_auth = None
135
+
136
+ if proxy:
137
+ # 兼容字典格式:{"http": "http://...", "https": "http://..."}
138
+ if isinstance(proxy, dict):
139
+ # 优先使用 https,否则用 http
140
+ proxy = proxy.get("https") or proxy.get("http")
141
+
142
+ if not isinstance(proxy, (str, URL)):
143
+ raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
144
+
145
+ try:
146
+ proxy_url = URL(proxy)
147
+ if proxy_url.scheme not in ("http", "https"):
148
+ raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
149
+
150
+ # 提取认证信息
151
+ if proxy_url.user and proxy_url.password:
152
+ proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
153
+ # 去掉用户密码的 URL
154
+ proxy = str(proxy_url.with_user(None))
155
+ else:
156
+ proxy = str(proxy_url)
157
+
158
+ kwargs["proxy"] = proxy
159
+ if proxy_auth:
160
+ kwargs["proxy_auth"] = proxy_auth
161
+
162
+ except Exception as e:
163
+ raise ValueError(f"Invalid proxy URL: {proxy}") from e
164
+
165
+ # 处理通过meta传递的代理认证信息
166
+ meta_proxy_auth = request.meta.get("proxy_auth")
167
+ if meta_proxy_auth and isinstance(meta_proxy_auth, dict):
168
+ username = meta_proxy_auth.get("username")
169
+ password = meta_proxy_auth.get("password")
170
+ if username and password:
171
+ kwargs["proxy_auth"] = BasicAuth(username, password)
172
+
173
+ # === 处理请求体 ===
174
+ if hasattr(request, "_json_body") and request._json_body is not None:
175
+ kwargs["json"] = request._json_body
176
+ elif isinstance(request.body, (dict, list)):
177
+ kwargs["json"] = request.body
178
+ else:
179
+ if request.body is not None:
180
+ kwargs["data"] = request.body
181
+
182
+ return await method_func(request.url, **kwargs)
183
+
184
+ @staticmethod
185
+ def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
186
+ """构造框架所需的 Response 对象"""
187
+ return Response(
188
+ url=str(resp.url),
189
+ headers=dict(resp.headers),
190
+ status_code=resp.status,
191
+ body=body,
192
+ request=request,
193
+ )
194
+
195
+ # --- 请求追踪日志 ---
196
+ async def _on_request_start(self, session, trace_config_ctx, params):
197
+ """请求开始时的回调。"""
198
+ proxy = getattr(params, "proxy", None)
199
+ proxy_info = f" via {proxy}" if proxy else ""
200
+ self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
201
+
202
+ async def _on_request_end(self, session, trace_config_ctx, params):
203
+ """请求成功结束时的回调。"""
204
+ response = params.response
205
+ self.logger.debug(
206
+ f"Finished: {params.method} {params.url} with status {response.status}"
207
+ )
208
+
209
+ async def _on_request_exception(self, session, trace_config_ctx, params):
210
+ """请求发生异常时的回调。"""
211
+ exc = params.exception
212
+ self.logger.warning(
213
+ f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
214
+ )
215
+
216
+ async def close(self) -> None:
217
+ """关闭会话资源"""
218
+ if self.session and not self.session.closed:
219
+ self.logger.info("Closing AioHttpDownloader session...")
220
+ await self.session.close()
213
221
  self.logger.debug("AioHttpDownloader closed.")