crawlo 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (111) hide show
  1. crawlo/__init__.py +33 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -155
  6. crawlo/commands/genspider.py +125 -110
  7. crawlo/commands/list.py +147 -119
  8. crawlo/commands/run.py +285 -170
  9. crawlo/commands/startproject.py +111 -101
  10. crawlo/commands/stats.py +188 -167
  11. crawlo/core/__init__.py +2 -2
  12. crawlo/core/engine.py +158 -158
  13. crawlo/core/processor.py +40 -40
  14. crawlo/core/scheduler.py +57 -57
  15. crawlo/crawler.py +494 -492
  16. crawlo/downloader/__init__.py +78 -78
  17. crawlo/downloader/aiohttp_downloader.py +199 -199
  18. crawlo/downloader/cffi_downloader.py +242 -277
  19. crawlo/downloader/httpx_downloader.py +246 -246
  20. crawlo/event.py +11 -11
  21. crawlo/exceptions.py +78 -78
  22. crawlo/extension/__init__.py +31 -31
  23. crawlo/extension/log_interval.py +49 -49
  24. crawlo/extension/log_stats.py +44 -44
  25. crawlo/extension/logging_extension.py +34 -34
  26. crawlo/filters/__init__.py +37 -37
  27. crawlo/filters/aioredis_filter.py +150 -150
  28. crawlo/filters/memory_filter.py +202 -202
  29. crawlo/items/__init__.py +23 -23
  30. crawlo/items/base.py +21 -21
  31. crawlo/items/fields.py +53 -53
  32. crawlo/items/items.py +104 -104
  33. crawlo/middleware/__init__.py +21 -21
  34. crawlo/middleware/default_header.py +32 -32
  35. crawlo/middleware/download_delay.py +28 -28
  36. crawlo/middleware/middleware_manager.py +135 -135
  37. crawlo/middleware/proxy.py +245 -245
  38. crawlo/middleware/request_ignore.py +30 -30
  39. crawlo/middleware/response_code.py +18 -18
  40. crawlo/middleware/response_filter.py +26 -26
  41. crawlo/middleware/retry.py +90 -90
  42. crawlo/network/__init__.py +7 -7
  43. crawlo/network/request.py +203 -203
  44. crawlo/network/response.py +166 -166
  45. crawlo/pipelines/__init__.py +13 -13
  46. crawlo/pipelines/console_pipeline.py +39 -39
  47. crawlo/pipelines/mongo_pipeline.py +116 -116
  48. crawlo/pipelines/mysql_batch_pipline.py +272 -272
  49. crawlo/pipelines/mysql_pipeline.py +195 -195
  50. crawlo/pipelines/pipeline_manager.py +56 -56
  51. crawlo/project.py +153 -0
  52. crawlo/settings/__init__.py +7 -7
  53. crawlo/settings/default_settings.py +166 -168
  54. crawlo/settings/setting_manager.py +99 -99
  55. crawlo/spider/__init__.py +129 -129
  56. crawlo/stats_collector.py +59 -59
  57. crawlo/subscriber.py +106 -106
  58. crawlo/task_manager.py +27 -27
  59. crawlo/templates/crawlo.cfg.tmpl +10 -10
  60. crawlo/templates/project/__init__.py.tmpl +3 -3
  61. crawlo/templates/project/items.py.tmpl +17 -17
  62. crawlo/templates/project/middlewares.py.tmpl +75 -75
  63. crawlo/templates/project/pipelines.py.tmpl +63 -63
  64. crawlo/templates/project/settings.py.tmpl +54 -54
  65. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  66. crawlo/templates/spider/spider.py.tmpl +31 -31
  67. crawlo/utils/__init__.py +7 -7
  68. crawlo/utils/date_tools.py +233 -233
  69. crawlo/utils/db_helper.py +343 -343
  70. crawlo/utils/func_tools.py +82 -82
  71. crawlo/utils/log.py +128 -128
  72. crawlo/utils/pqueue.py +173 -173
  73. crawlo/utils/request.py +267 -267
  74. crawlo/utils/spider_loader.py +62 -62
  75. crawlo/utils/system.py +11 -11
  76. crawlo/utils/tools.py +4 -4
  77. crawlo/utils/url.py +39 -39
  78. crawlo-1.1.1.dist-info/METADATA +220 -0
  79. crawlo-1.1.1.dist-info/RECORD +100 -0
  80. examples/__init__.py +7 -0
  81. examples/baidu_spider/__init__.py +7 -0
  82. examples/baidu_spider/demo.py +94 -0
  83. examples/baidu_spider/items.py +46 -0
  84. examples/baidu_spider/middleware.py +49 -0
  85. examples/baidu_spider/pipeline.py +55 -0
  86. examples/baidu_spider/run.py +27 -0
  87. examples/baidu_spider/settings.py +121 -0
  88. examples/baidu_spider/spiders/__init__.py +7 -0
  89. examples/baidu_spider/spiders/bai_du.py +61 -0
  90. examples/baidu_spider/spiders/miit.py +159 -0
  91. examples/baidu_spider/spiders/sina.py +79 -0
  92. tests/__init__.py +7 -7
  93. tests/test_proxy_health_check.py +32 -32
  94. tests/test_proxy_middleware_integration.py +136 -136
  95. tests/test_proxy_providers.py +56 -56
  96. tests/test_proxy_stats.py +19 -19
  97. tests/test_proxy_strategies.py +59 -59
  98. crawlo/utils/concurrency_manager.py +0 -125
  99. crawlo/utils/project.py +0 -197
  100. crawlo-1.1.0.dist-info/METADATA +0 -49
  101. crawlo-1.1.0.dist-info/RECORD +0 -97
  102. examples/gxb/__init__.py +0 -0
  103. examples/gxb/items.py +0 -36
  104. examples/gxb/run.py +0 -16
  105. examples/gxb/settings.py +0 -72
  106. examples/gxb/spider/__init__.py +0 -2
  107. examples/gxb/spider/miit_spider.py +0 -180
  108. examples/gxb/spider/telecom_device.py +0 -129
  109. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
  110. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
  111. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
@@ -1,78 +1,78 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from abc import abstractmethod, ABCMeta
4
- from typing_extensions import Self
5
- from typing import Final, Set, Optional
6
- from contextlib import asynccontextmanager
7
-
8
- from crawlo import Response, Request
9
- from crawlo.utils.log import get_logger
10
- from crawlo.middleware.middleware_manager import MiddlewareManager
11
-
12
-
13
- class ActivateRequestManager:
14
-
15
- def __init__(self):
16
- self._active: Final[Set] = set()
17
-
18
- def add(self, request):
19
- self._active.add(request)
20
-
21
- def remove(self, request):
22
- self._active.remove(request)
23
-
24
- @asynccontextmanager
25
- async def __call__(self, request):
26
- try:
27
- yield self.add(request)
28
- finally:
29
- self.remove(request)
30
-
31
- def __len__(self):
32
- return len(self._active)
33
-
34
-
35
- class DownloaderMeta(ABCMeta):
36
- def __subclasscheck__(self, subclass):
37
- required_methods = ('fetch', 'download', 'create_instance', 'close')
38
- is_subclass = all(
39
- hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
40
- )
41
- return is_subclass
42
-
43
-
44
- class DownloaderBase(metaclass=DownloaderMeta):
45
- def __init__(self, crawler):
46
- self.crawler = crawler
47
- self._active = ActivateRequestManager()
48
- self.middleware: Optional[MiddlewareManager] = None
49
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
50
-
51
- @classmethod
52
- def create_instance(cls, *args, **kwargs) -> Self:
53
- return cls(*args, **kwargs)
54
-
55
- def open(self) -> None:
56
- self.logger.info(
57
- f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
- f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
- )
60
- self.middleware = MiddlewareManager.create_instance(self.crawler)
61
-
62
- async def fetch(self, request) -> Optional[Response]:
63
- async with self._active(request):
64
- response = await self.middleware.download(request)
65
- return response
66
-
67
- @abstractmethod
68
- async def download(self, request: Request) -> Response:
69
- pass
70
-
71
- async def close(self) -> None:
72
- pass
73
-
74
- def idle(self) -> bool:
75
- return len(self) == 0
76
-
77
- def __len__(self) -> int:
78
- return len(self._active)
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from abc import abstractmethod, ABCMeta
4
+ from typing_extensions import Self
5
+ from typing import Final, Set, Optional
6
+ from contextlib import asynccontextmanager
7
+
8
+ from crawlo import Response, Request
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.middleware.middleware_manager import MiddlewareManager
11
+
12
+
13
+ class ActivateRequestManager:
14
+
15
+ def __init__(self):
16
+ self._active: Final[Set] = set()
17
+
18
+ def add(self, request):
19
+ self._active.add(request)
20
+
21
+ def remove(self, request):
22
+ self._active.remove(request)
23
+
24
+ @asynccontextmanager
25
+ async def __call__(self, request):
26
+ try:
27
+ yield self.add(request)
28
+ finally:
29
+ self.remove(request)
30
+
31
+ def __len__(self):
32
+ return len(self._active)
33
+
34
+
35
+ class DownloaderMeta(ABCMeta):
36
+ def __subclasscheck__(self, subclass):
37
+ required_methods = ('fetch', 'download', 'create_instance', 'close')
38
+ is_subclass = all(
39
+ hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
40
+ )
41
+ return is_subclass
42
+
43
+
44
+ class DownloaderBase(metaclass=DownloaderMeta):
45
+ def __init__(self, crawler):
46
+ self.crawler = crawler
47
+ self._active = ActivateRequestManager()
48
+ self.middleware: Optional[MiddlewareManager] = None
49
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
50
+
51
+ @classmethod
52
+ def create_instance(cls, *args, **kwargs) -> Self:
53
+ return cls(*args, **kwargs)
54
+
55
+ def open(self) -> None:
56
+ self.logger.info(
57
+ f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
+ f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
+ )
60
+ self.middleware = MiddlewareManager.create_instance(self.crawler)
61
+
62
+ async def fetch(self, request) -> Optional[Response]:
63
+ async with self._active(request):
64
+ response = await self.middleware.download(request)
65
+ return response
66
+
67
+ @abstractmethod
68
+ async def download(self, request: Request) -> Response:
69
+ pass
70
+
71
+ async def close(self) -> None:
72
+ pass
73
+
74
+ def idle(self) -> bool:
75
+ return len(self) == 0
76
+
77
+ def __len__(self) -> int:
78
+ return len(self._active)
@@ -1,200 +1,200 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- from yarl import URL
4
- from typing import Optional
5
- from aiohttp import (
6
- ClientSession,
7
- TCPConnector,
8
- ClientTimeout,
9
- TraceConfig,
10
- ClientResponse,
11
- ClientError,
12
- BasicAuth,
13
- )
14
-
15
- from crawlo import Response
16
- from crawlo.utils.log import get_logger
17
- from crawlo.downloader import DownloaderBase
18
-
19
-
20
- class AioHttpDownloader(DownloaderBase):
21
- """
22
- 高性能异步下载器
23
- - 基于持久化 ClientSession
24
- - 智能识别 Request 的高层语义(json_body/form_data)
25
- - 支持 GET/POST/PUT/DELETE 等方法
26
- - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
- - 内存安全防护
28
- """
29
-
30
- def __init__(self, crawler):
31
- super().__init__(crawler)
32
- self.session: Optional[ClientSession] = None
33
- self.max_download_size: int = 0
34
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
-
36
- def open(self):
37
- super().open()
38
- self.logger.info("Opening AioHttpDownloader")
39
-
40
- # 读取配置
41
- timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
- verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
- self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
-
47
- # 创建连接器
48
- connector = TCPConnector(
49
- verify_ssl=verify_ssl,
50
- limit=pool_limit,
51
- limit_per_host=pool_per_host,
52
- ttl_dns_cache=300,
53
- keepalive_timeout=15,
54
- force_close=False,
55
- )
56
-
57
- # 超时控制
58
- timeout = ClientTimeout(total=timeout_secs)
59
-
60
- # 请求追踪
61
- trace_config = TraceConfig()
62
- trace_config.on_request_start.append(self._on_request_start)
63
- trace_config.on_request_end.append(self._on_request_end)
64
- trace_config.on_request_exception.append(self._on_request_exception)
65
-
66
- # 创建全局 session
67
- self.session = ClientSession(
68
- connector=connector,
69
- timeout=timeout,
70
- trace_configs=[trace_config],
71
- auto_decompress=True,
72
- )
73
-
74
- self.logger.debug("AioHttpDownloader initialized.")
75
-
76
- async def download(self, request) -> Optional[Response]:
77
- if not self.session or self.session.closed:
78
- raise RuntimeError("AioHttpDownloader session is not open.")
79
-
80
- try:
81
- # 使用通用发送逻辑(支持所有 HTTP 方法)
82
- async with await self._send_request(self.session, request) as resp:
83
- # 安全检查:防止大响应体导致 OOM
84
- content_length = resp.headers.get("Content-Length")
85
- if content_length and int(content_length) > self.max_download_size:
86
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
87
-
88
- body = await resp.read()
89
- return self._structure_response(request, resp, body)
90
-
91
- except ClientError as e:
92
- self.logger.error(f"Client error for {request.url}: {e}")
93
- raise
94
- except Exception as e:
95
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
96
- raise
97
-
98
- @staticmethod
99
- async def _send_request(session: ClientSession, request) -> ClientResponse:
100
- """
101
- 根据请求方法和高层语义智能发送请求。
102
- 支持中间件设置的 proxy,兼容以下格式:
103
- - str: "http://user:pass@host:port"
104
- - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
105
- """
106
- method = request.method.lower()
107
- if not hasattr(session, method):
108
- raise ValueError(f"Unsupported HTTP method: {request.method}")
109
-
110
- method_func = getattr(session, method)
111
-
112
- # 构造参数
113
- kwargs = {
114
- "headers": request.headers,
115
- "cookies": request.cookies,
116
- "allow_redirects": request.allow_redirects,
117
- }
118
-
119
- # === 处理代理(proxy)===
120
- proxy = getattr(request, "proxy", None)
121
- proxy_auth = None
122
-
123
- if proxy:
124
- # 兼容字典格式:{"http": "http://...", "https": "http://..."}
125
- if isinstance(proxy, dict):
126
- # 优先使用 https,否则用 http
127
- proxy = proxy.get("https") or proxy.get("http")
128
-
129
- if not isinstance(proxy, (str, URL)):
130
- raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
131
-
132
- try:
133
- proxy_url = URL(proxy)
134
- if proxy_url.scheme not in ("http", "https"):
135
- raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
136
-
137
- # 提取认证信息
138
- if proxy_url.user and proxy_url.password:
139
- proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
140
- # 去掉用户密码的 URL
141
- proxy = str(proxy_url.with_user(None))
142
- else:
143
- proxy = str(proxy_url)
144
-
145
- kwargs["proxy"] = proxy
146
- if proxy_auth:
147
- kwargs["proxy_auth"] = proxy_auth
148
-
149
- except Exception as e:
150
- raise ValueError(f"Invalid proxy URL: {proxy}") from e
151
-
152
- # === 处理请求体 ===
153
- if hasattr(request, "_json_body") and request._json_body is not None:
154
- kwargs["json"] = request._json_body
155
- elif isinstance(request.body, (dict, list)):
156
- kwargs["json"] = request.body
157
- else:
158
- if request.body is not None:
159
- kwargs["data"] = request.body
160
-
161
- return await method_func(request.url, **kwargs)
162
-
163
- @staticmethod
164
- def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
165
- """构造框架所需的 Response 对象"""
166
- return Response(
167
- url=str(resp.url),
168
- headers=dict(resp.headers),
169
- status_code=resp.status,
170
- body=body,
171
- request=request,
172
- )
173
-
174
- # --- 请求追踪日志 ---
175
- async def _on_request_start(self, session, trace_config_ctx, params):
176
- """请求开始时的回调。"""
177
- proxy = getattr(params, "proxy", None)
178
- proxy_info = f" via {proxy}" if proxy else ""
179
- self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
180
-
181
- async def _on_request_end(self, session, trace_config_ctx, params):
182
- """请求成功结束时的回调。"""
183
- response = params.response
184
- self.logger.debug(
185
- f"Finished: {params.method} {params.url} with status {response.status}"
186
- )
187
-
188
- async def _on_request_exception(self, session, trace_config_ctx, params):
189
- """请求发生异常时的回调。"""
190
- exc = params.exception
191
- self.logger.warning(
192
- f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
193
- )
194
-
195
- async def close(self) -> None:
196
- """关闭会话资源"""
197
- if self.session and not self.session.closed:
198
- self.logger.info("Closing AioHttpDownloader session...")
199
- await self.session.close()
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from yarl import URL
4
+ from typing import Optional
5
+ from aiohttp import (
6
+ ClientSession,
7
+ TCPConnector,
8
+ ClientTimeout,
9
+ TraceConfig,
10
+ ClientResponse,
11
+ ClientError,
12
+ BasicAuth,
13
+ )
14
+
15
+ from crawlo import Response
16
+ from crawlo.utils.log import get_logger
17
+ from crawlo.downloader import DownloaderBase
18
+
19
+
20
+ class AioHttpDownloader(DownloaderBase):
21
+ """
22
+ 高性能异步下载器
23
+ - 基于持久化 ClientSession
24
+ - 智能识别 Request 的高层语义(json_body/form_data)
25
+ - 支持 GET/POST/PUT/DELETE 等方法
26
+ - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
+ - 内存安全防护
28
+ """
29
+
30
+ def __init__(self, crawler):
31
+ super().__init__(crawler)
32
+ self.session: Optional[ClientSession] = None
33
+ self.max_download_size: int = 0
34
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
+
36
+ def open(self):
37
+ super().open()
38
+ self.logger.info("Opening AioHttpDownloader")
39
+
40
+ # 读取配置
41
+ timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
+ verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
+ self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
+
47
+ # 创建连接器
48
+ connector = TCPConnector(
49
+ verify_ssl=verify_ssl,
50
+ limit=pool_limit,
51
+ limit_per_host=pool_per_host,
52
+ ttl_dns_cache=300,
53
+ keepalive_timeout=15,
54
+ force_close=False,
55
+ )
56
+
57
+ # 超时控制
58
+ timeout = ClientTimeout(total=timeout_secs)
59
+
60
+ # 请求追踪
61
+ trace_config = TraceConfig()
62
+ trace_config.on_request_start.append(self._on_request_start)
63
+ trace_config.on_request_end.append(self._on_request_end)
64
+ trace_config.on_request_exception.append(self._on_request_exception)
65
+
66
+ # 创建全局 session
67
+ self.session = ClientSession(
68
+ connector=connector,
69
+ timeout=timeout,
70
+ trace_configs=[trace_config],
71
+ auto_decompress=True,
72
+ )
73
+
74
+ self.logger.debug("AioHttpDownloader initialized.")
75
+
76
+ async def download(self, request) -> Optional[Response]:
77
+ if not self.session or self.session.closed:
78
+ raise RuntimeError("AioHttpDownloader session is not open.")
79
+
80
+ try:
81
+ # 使用通用发送逻辑(支持所有 HTTP 方法)
82
+ async with await self._send_request(self.session, request) as resp:
83
+ # 安全检查:防止大响应体导致 OOM
84
+ content_length = resp.headers.get("Content-Length")
85
+ if content_length and int(content_length) > self.max_download_size:
86
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
87
+
88
+ body = await resp.read()
89
+ return self._structure_response(request, resp, body)
90
+
91
+ except ClientError as e:
92
+ self.logger.error(f"Client error for {request.url}: {e}")
93
+ raise
94
+ except Exception as e:
95
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
96
+ raise
97
+
98
+ @staticmethod
99
+ async def _send_request(session: ClientSession, request) -> ClientResponse:
100
+ """
101
+ 根据请求方法和高层语义智能发送请求。
102
+ 支持中间件设置的 proxy,兼容以下格式:
103
+ - str: "http://user:pass@host:port"
104
+ - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
105
+ """
106
+ method = request.method.lower()
107
+ if not hasattr(session, method):
108
+ raise ValueError(f"Unsupported HTTP method: {request.method}")
109
+
110
+ method_func = getattr(session, method)
111
+
112
+ # 构造参数
113
+ kwargs = {
114
+ "headers": request.headers,
115
+ "cookies": request.cookies,
116
+ "allow_redirects": request.allow_redirects,
117
+ }
118
+
119
+ # === 处理代理(proxy)===
120
+ proxy = getattr(request, "proxy", None)
121
+ proxy_auth = None
122
+
123
+ if proxy:
124
+ # 兼容字典格式:{"http": "http://...", "https": "http://..."}
125
+ if isinstance(proxy, dict):
126
+ # 优先使用 https,否则用 http
127
+ proxy = proxy.get("https") or proxy.get("http")
128
+
129
+ if not isinstance(proxy, (str, URL)):
130
+ raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
131
+
132
+ try:
133
+ proxy_url = URL(proxy)
134
+ if proxy_url.scheme not in ("http", "https"):
135
+ raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
136
+
137
+ # 提取认证信息
138
+ if proxy_url.user and proxy_url.password:
139
+ proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
140
+ # 去掉用户密码的 URL
141
+ proxy = str(proxy_url.with_user(None))
142
+ else:
143
+ proxy = str(proxy_url)
144
+
145
+ kwargs["proxy"] = proxy
146
+ if proxy_auth:
147
+ kwargs["proxy_auth"] = proxy_auth
148
+
149
+ except Exception as e:
150
+ raise ValueError(f"Invalid proxy URL: {proxy}") from e
151
+
152
+ # === 处理请求体 ===
153
+ if hasattr(request, "_json_body") and request._json_body is not None:
154
+ kwargs["json"] = request._json_body
155
+ elif isinstance(request.body, (dict, list)):
156
+ kwargs["json"] = request.body
157
+ else:
158
+ if request.body is not None:
159
+ kwargs["data"] = request.body
160
+
161
+ return await method_func(request.url, **kwargs)
162
+
163
+ @staticmethod
164
+ def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
165
+ """构造框架所需的 Response 对象"""
166
+ return Response(
167
+ url=str(resp.url),
168
+ headers=dict(resp.headers),
169
+ status_code=resp.status,
170
+ body=body,
171
+ request=request,
172
+ )
173
+
174
+ # --- 请求追踪日志 ---
175
+ async def _on_request_start(self, session, trace_config_ctx, params):
176
+ """请求开始时的回调。"""
177
+ proxy = getattr(params, "proxy", None)
178
+ proxy_info = f" via {proxy}" if proxy else ""
179
+ self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
180
+
181
+ async def _on_request_end(self, session, trace_config_ctx, params):
182
+ """请求成功结束时的回调。"""
183
+ response = params.response
184
+ self.logger.debug(
185
+ f"Finished: {params.method} {params.url} with status {response.status}"
186
+ )
187
+
188
+ async def _on_request_exception(self, session, trace_config_ctx, params):
189
+ """请求发生异常时的回调。"""
190
+ exc = params.exception
191
+ self.logger.warning(
192
+ f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
193
+ )
194
+
195
+ async def close(self) -> None:
196
+ """关闭会话资源"""
197
+ if self.session and not self.session.closed:
198
+ self.logger.info("Closing AioHttpDownloader session...")
199
+ await self.session.close()
200
200
  self.logger.debug("AioHttpDownloader closed.")