crawlo 1.0.9__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (111) hide show
  1. crawlo/__init__.py +33 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -106
  6. crawlo/commands/genspider.py +125 -110
  7. crawlo/commands/list.py +147 -92
  8. crawlo/commands/run.py +286 -181
  9. crawlo/commands/startproject.py +111 -101
  10. crawlo/commands/stats.py +188 -59
  11. crawlo/core/__init__.py +2 -2
  12. crawlo/core/engine.py +158 -158
  13. crawlo/core/processor.py +40 -40
  14. crawlo/core/scheduler.py +57 -57
  15. crawlo/crawler.py +494 -492
  16. crawlo/downloader/__init__.py +78 -78
  17. crawlo/downloader/aiohttp_downloader.py +199 -199
  18. crawlo/downloader/cffi_downloader.py +242 -277
  19. crawlo/downloader/httpx_downloader.py +246 -246
  20. crawlo/event.py +11 -11
  21. crawlo/exceptions.py +78 -78
  22. crawlo/extension/__init__.py +31 -31
  23. crawlo/extension/log_interval.py +49 -49
  24. crawlo/extension/log_stats.py +44 -44
  25. crawlo/extension/logging_extension.py +34 -34
  26. crawlo/filters/__init__.py +37 -37
  27. crawlo/filters/aioredis_filter.py +150 -150
  28. crawlo/filters/memory_filter.py +202 -202
  29. crawlo/items/__init__.py +23 -23
  30. crawlo/items/base.py +21 -21
  31. crawlo/items/fields.py +53 -53
  32. crawlo/items/items.py +104 -104
  33. crawlo/middleware/__init__.py +21 -21
  34. crawlo/middleware/default_header.py +32 -32
  35. crawlo/middleware/download_delay.py +28 -28
  36. crawlo/middleware/middleware_manager.py +135 -135
  37. crawlo/middleware/proxy.py +245 -245
  38. crawlo/middleware/request_ignore.py +30 -30
  39. crawlo/middleware/response_code.py +18 -18
  40. crawlo/middleware/response_filter.py +26 -26
  41. crawlo/middleware/retry.py +90 -90
  42. crawlo/network/__init__.py +7 -7
  43. crawlo/network/request.py +203 -203
  44. crawlo/network/response.py +166 -166
  45. crawlo/pipelines/__init__.py +13 -13
  46. crawlo/pipelines/console_pipeline.py +39 -39
  47. crawlo/pipelines/mongo_pipeline.py +116 -116
  48. crawlo/pipelines/mysql_batch_pipline.py +272 -272
  49. crawlo/pipelines/mysql_pipeline.py +195 -195
  50. crawlo/pipelines/pipeline_manager.py +56 -56
  51. crawlo/project.py +153 -0
  52. crawlo/settings/__init__.py +7 -7
  53. crawlo/settings/default_settings.py +166 -168
  54. crawlo/settings/setting_manager.py +99 -99
  55. crawlo/spider/__init__.py +129 -129
  56. crawlo/stats_collector.py +59 -59
  57. crawlo/subscriber.py +106 -106
  58. crawlo/task_manager.py +27 -27
  59. crawlo/templates/crawlo.cfg.tmpl +10 -10
  60. crawlo/templates/project/__init__.py.tmpl +3 -3
  61. crawlo/templates/project/items.py.tmpl +17 -17
  62. crawlo/templates/project/middlewares.py.tmpl +75 -75
  63. crawlo/templates/project/pipelines.py.tmpl +63 -63
  64. crawlo/templates/project/settings.py.tmpl +54 -54
  65. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  66. crawlo/templates/spider/spider.py.tmpl +31 -31
  67. crawlo/utils/__init__.py +7 -7
  68. crawlo/utils/date_tools.py +233 -233
  69. crawlo/utils/db_helper.py +343 -343
  70. crawlo/utils/func_tools.py +82 -82
  71. crawlo/utils/log.py +128 -128
  72. crawlo/utils/pqueue.py +173 -173
  73. crawlo/utils/request.py +267 -267
  74. crawlo/utils/spider_loader.py +62 -62
  75. crawlo/utils/system.py +11 -11
  76. crawlo/utils/tools.py +4 -4
  77. crawlo/utils/url.py +39 -39
  78. crawlo-1.1.1.dist-info/METADATA +220 -0
  79. crawlo-1.1.1.dist-info/RECORD +100 -0
  80. examples/__init__.py +7 -0
  81. examples/baidu_spider/__init__.py +7 -0
  82. examples/baidu_spider/demo.py +94 -0
  83. examples/baidu_spider/items.py +46 -0
  84. examples/baidu_spider/middleware.py +49 -0
  85. examples/baidu_spider/pipeline.py +55 -0
  86. examples/baidu_spider/run.py +27 -0
  87. examples/baidu_spider/settings.py +121 -0
  88. examples/baidu_spider/spiders/__init__.py +7 -0
  89. examples/baidu_spider/spiders/bai_du.py +61 -0
  90. examples/baidu_spider/spiders/miit.py +159 -0
  91. examples/baidu_spider/spiders/sina.py +79 -0
  92. tests/__init__.py +7 -7
  93. tests/test_proxy_health_check.py +32 -32
  94. tests/test_proxy_middleware_integration.py +136 -136
  95. tests/test_proxy_providers.py +56 -56
  96. tests/test_proxy_stats.py +19 -19
  97. tests/test_proxy_strategies.py +59 -59
  98. crawlo/utils/concurrency_manager.py +0 -125
  99. crawlo/utils/project.py +0 -197
  100. crawlo-1.0.9.dist-info/METADATA +0 -49
  101. crawlo-1.0.9.dist-info/RECORD +0 -97
  102. examples/gxb/__init__.py +0 -0
  103. examples/gxb/items.py +0 -36
  104. examples/gxb/run.py +0 -16
  105. examples/gxb/settings.py +0 -72
  106. examples/gxb/spider/__init__.py +0 -0
  107. examples/gxb/spider/miit_spider.py +0 -180
  108. examples/gxb/spider/telecom_device.py +0 -129
  109. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
  110. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
  111. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
@@ -1,90 +1,90 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import List
4
- from anyio import EndOfStream
5
- from httpcore import ReadError
6
- from asyncio.exceptions import TimeoutError
7
- from httpx import RemoteProtocolError, ConnectError, ReadTimeout
8
- from aiohttp.client_exceptions import ClientConnectionError, ClientPayloadError
9
- from aiohttp import ClientConnectorError, ClientTimeout, ClientConnectorSSLError, ClientResponseError
10
-
11
- from crawlo.utils.log import get_logger
12
- from crawlo.stats_collector import StatsCollector
13
-
14
- _retry_exceptions = [
15
- EndOfStream,
16
- ReadError,
17
- TimeoutError,
18
- ConnectError,
19
- ReadTimeout,
20
- ClientConnectorError,
21
- ClientResponseError,
22
- RemoteProtocolError,
23
- ClientTimeout,
24
- ClientConnectorSSLError,
25
- ClientPayloadError,
26
- ClientConnectionError
27
- ]
28
-
29
-
30
- class RetryMiddleware(object):
31
-
32
- def __init__(
33
- self,
34
- *,
35
- retry_http_codes: List,
36
- ignore_http_codes: List,
37
- max_retry_times: int,
38
- retry_exceptions: List,
39
- stats: StatsCollector,
40
- retry_priority: int
41
- ):
42
- self.retry_http_codes = retry_http_codes
43
- self.ignore_http_codes = ignore_http_codes
44
- self.max_retry_times = max_retry_times
45
- self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
46
- self.retry_priority = retry_priority
47
- self.stats = stats
48
- self.logger = get_logger(self.__class__.__name__)
49
-
50
- @classmethod
51
- def create_instance(cls, crawler):
52
- o = cls(
53
- retry_http_codes=crawler.settings.get_list('RETRY_HTTP_CODES'),
54
- ignore_http_codes=crawler.settings.get_list('IGNORE_HTTP_CODES'),
55
- max_retry_times=crawler.settings.get_int('MAX_RETRY_TIMES'),
56
- retry_exceptions=crawler.settings.get_list('RETRY_EXCEPTIONS'),
57
- stats=crawler.stats,
58
- retry_priority=crawler.settings.get_int('RETRY_PRIORITY')
59
- )
60
- return o
61
-
62
- def process_response(self, request, response, spider):
63
- if request.meta.get('dont_retry', False):
64
- return response
65
- if response.status_code in self.ignore_http_codes:
66
- return response
67
- if response.status_code in self.retry_http_codes:
68
- # 重试逻辑
69
- reason = f"response code {response.status_code}"
70
- return self._retry(request, reason, spider) or response
71
- return response
72
-
73
- def process_exception(self, request, exc, spider):
74
- if isinstance(exc, self.retry_exceptions) and not request.meta.get('dont_retry', False):
75
- return self._retry(request=request, reason=type(exc).__name__, spider=spider)
76
-
77
- def _retry(self, request, reason, spider):
78
- retry_times = request.meta.get('retry_times', 0)
79
- if retry_times < self.max_retry_times:
80
- retry_times += 1
81
- self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
82
- request.meta['retry_times'] = retry_times
83
- # request.dont_retry = True
84
- request.meta['dont_retry'] = True
85
- request.priority = request.priority + self.retry_priority
86
- self.stats.inc_value("retry_count")
87
- return request
88
- else:
89
- self.logger.warning(f"{spider} {request} {reason} retry max {self.max_retry_times} times, give up.")
90
- return None
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import List
4
+ from anyio import EndOfStream
5
+ from httpcore import ReadError
6
+ from asyncio.exceptions import TimeoutError
7
+ from httpx import RemoteProtocolError, ConnectError, ReadTimeout
8
+ from aiohttp.client_exceptions import ClientConnectionError, ClientPayloadError
9
+ from aiohttp import ClientConnectorError, ClientTimeout, ClientConnectorSSLError, ClientResponseError
10
+
11
+ from crawlo.utils.log import get_logger
12
+ from crawlo.stats_collector import StatsCollector
13
+
14
+ _retry_exceptions = [
15
+ EndOfStream,
16
+ ReadError,
17
+ TimeoutError,
18
+ ConnectError,
19
+ ReadTimeout,
20
+ ClientConnectorError,
21
+ ClientResponseError,
22
+ RemoteProtocolError,
23
+ ClientTimeout,
24
+ ClientConnectorSSLError,
25
+ ClientPayloadError,
26
+ ClientConnectionError
27
+ ]
28
+
29
+
30
+ class RetryMiddleware(object):
31
+
32
+ def __init__(
33
+ self,
34
+ *,
35
+ retry_http_codes: List,
36
+ ignore_http_codes: List,
37
+ max_retry_times: int,
38
+ retry_exceptions: List,
39
+ stats: StatsCollector,
40
+ retry_priority: int
41
+ ):
42
+ self.retry_http_codes = retry_http_codes
43
+ self.ignore_http_codes = ignore_http_codes
44
+ self.max_retry_times = max_retry_times
45
+ self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
46
+ self.retry_priority = retry_priority
47
+ self.stats = stats
48
+ self.logger = get_logger(self.__class__.__name__)
49
+
50
+ @classmethod
51
+ def create_instance(cls, crawler):
52
+ o = cls(
53
+ retry_http_codes=crawler.settings.get_list('RETRY_HTTP_CODES'),
54
+ ignore_http_codes=crawler.settings.get_list('IGNORE_HTTP_CODES'),
55
+ max_retry_times=crawler.settings.get_int('MAX_RETRY_TIMES'),
56
+ retry_exceptions=crawler.settings.get_list('RETRY_EXCEPTIONS'),
57
+ stats=crawler.stats,
58
+ retry_priority=crawler.settings.get_int('RETRY_PRIORITY')
59
+ )
60
+ return o
61
+
62
+ def process_response(self, request, response, spider):
63
+ if request.meta.get('dont_retry', False):
64
+ return response
65
+ if response.status_code in self.ignore_http_codes:
66
+ return response
67
+ if response.status_code in self.retry_http_codes:
68
+ # 重试逻辑
69
+ reason = f"response code {response.status_code}"
70
+ return self._retry(request, reason, spider) or response
71
+ return response
72
+
73
+ def process_exception(self, request, exc, spider):
74
+ if isinstance(exc, self.retry_exceptions) and not request.meta.get('dont_retry', False):
75
+ return self._retry(request=request, reason=type(exc).__name__, spider=spider)
76
+
77
+ def _retry(self, request, reason, spider):
78
+ retry_times = request.meta.get('retry_times', 0)
79
+ if retry_times < self.max_retry_times:
80
+ retry_times += 1
81
+ self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
82
+ request.meta['retry_times'] = retry_times
83
+ # request.dont_retry = True
84
+ request.meta['dont_retry'] = True
85
+ request.priority = request.priority + self.retry_priority
86
+ self.stats.inc_value("retry_count")
87
+ return request
88
+ else:
89
+ self.logger.warning(f"{spider} {request} {reason} retry max {self.max_retry_times} times, give up.")
90
+ return None
@@ -1,7 +1,7 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-02-05 14:07
5
- # @Author : oscar
6
- # @Desc : None
7
- """
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-02-05 14:07
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """
crawlo/network/request.py CHANGED
@@ -1,204 +1,204 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- import json
4
- from copy import deepcopy
5
- from urllib.parse import urlencode
6
- from w3lib.url import safe_url_string
7
- from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
8
-
9
- from crawlo.utils.url import escape_ajax
10
-
11
-
12
- _Request = TypeVar("_Request", bound="Request")
13
-
14
-
15
- class RequestPriority:
16
- """请求优先级常量"""
17
- HIGH = -100
18
- NORMAL = 0
19
- LOW = 100
20
-
21
-
22
- class Request:
23
- """
24
- 封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
25
- 支持 JSON、表单、原始 body 提交,自动处理 Content-Type 与编码。
26
- 不支持文件上传(multipart/form-data),保持轻量。
27
- """
28
-
29
- __slots__ = (
30
- '_url',
31
- '_meta',
32
- 'callback',
33
- 'cb_kwargs',
34
- 'err_back',
35
- 'headers',
36
- 'body',
37
- 'method',
38
- 'cookies',
39
- 'priority',
40
- 'encoding',
41
- 'dont_filter',
42
- 'timeout',
43
- 'proxy',
44
- 'allow_redirects',
45
- 'auth',
46
- 'verify',
47
- 'flags',
48
- '_json_body',
49
- '_form_data'
50
- )
51
-
52
- def __init__(
53
- self,
54
- url: str,
55
- callback: Optional[Callable] = None,
56
- method: Optional[str] = 'GET',
57
- headers: Optional[Dict[str, str]] = None,
58
- body: Optional[Union[bytes, str, Dict[Any, Any]]] = None,
59
- form_data: Optional[Dict[Any, Any]] = None,
60
- json_body: Optional[Dict[Any, Any]] = None,
61
- cb_kwargs: Optional[Dict[str, Any]] = None,
62
- cookies: Optional[Dict[str, str]] = None,
63
- meta: Optional[Dict[str, Any]] = None,
64
- priority: int = RequestPriority.NORMAL,
65
- dont_filter: bool = False,
66
- timeout: Optional[float] = None,
67
- proxy: Optional[str] = None,
68
- allow_redirects: bool = True,
69
- auth: Optional[tuple] = None,
70
- verify: bool = True,
71
- flags: Optional[List[str]] = None,
72
- encoding: str = 'utf-8'
73
- ):
74
- """
75
- 初始化请求对象。
76
-
77
- :param url: 请求 URL(必须)
78
- :param callback: 成功回调函数
79
- :param method: HTTP 方法,默认 GET
80
- :param headers: 请求头
81
- :param body: 原始请求体(bytes/str),若为 dict 且未使用 json_body/form_data,则自动转为 JSON
82
- :param form_data: 表单数据,自动转为 application/x-www-form-urlencoded
83
- :param json_body: JSON 数据,自动序列化并设置 Content-Type
84
- :param cb_kwargs: 传递给 callback 的额外参数
85
- :param cookies: Cookies 字典
86
- :param meta: 元数据(跨中间件传递数据)
87
- :param priority: 优先级(数值越小越优先)
88
- :param dont_filter: 是否跳过去重
89
- :param timeout: 超时时间(秒)
90
- :param proxy: 代理地址,如 http://127.0.0.1:8080
91
- :param allow_redirects: 是否允许重定向
92
- :param auth: 认证元组 (username, password)
93
- :param verify: 是否验证 SSL 证书
94
- :param flags: 标记(用于调试或分类)
95
- :param encoding: 字符编码,默认 utf-8
96
- """
97
- self.callback = callback
98
- self.method = str(method).upper()
99
- self.headers = headers or {}
100
- self.cookies = cookies or {}
101
- self.priority = -priority # 用于排序:值越小优先级越高
102
- self._meta = deepcopy(meta) if meta is not None else {}
103
- self.timeout = self._meta.get('download_timeout', timeout)
104
- self.proxy = proxy
105
- self.allow_redirects = allow_redirects
106
- self.auth = auth
107
- self.verify = verify
108
- self.flags = flags or []
109
- self.encoding = encoding
110
- self.cb_kwargs = cb_kwargs or {}
111
- self.body = body
112
- # 保存高层语义参数(用于 copy)
113
- self._json_body = json_body
114
- self._form_data = form_data
115
-
116
- # 构建 body
117
- if json_body is not None:
118
- if 'Content-Type' not in self.headers:
119
- self.headers['Content-Type'] = 'application/json'
120
- self.body = json.dumps(json_body, ensure_ascii=False).encode(encoding)
121
- if self.method == 'GET':
122
- self.method = 'POST'
123
-
124
- elif form_data is not None:
125
- if self.method == 'GET':
126
- self.method = 'POST'
127
- if 'Content-Type' not in self.headers:
128
- self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
129
- query_str = urlencode(form_data)
130
- self.body = query_str.encode(encoding) # ✅ 显式编码为 bytes
131
-
132
-
133
- else:
134
- # 处理原始 body
135
- if isinstance(self.body, dict):
136
- if 'Content-Type' not in self.headers:
137
- self.headers['Content-Type'] = 'application/json'
138
- self.body = json.dumps(self.body, ensure_ascii=False).encode(encoding)
139
- elif isinstance(self.body, str):
140
- self.body = self.body.encode(encoding)
141
-
142
- self.dont_filter = dont_filter
143
- self._set_url(url)
144
-
145
- def copy(self: _Request) -> _Request:
146
- """
147
- 创建当前请求的副本,保留所有高层语义(json_body/form_data)。
148
- """
149
- return type(self)(
150
- url=self.url,
151
- callback=self.callback,
152
- method=self.method,
153
- headers=self.headers.copy(),
154
- body=None, # 由 form_data/json_body 重新生成
155
- form_data=self._form_data,
156
- json_body=self._json_body,
157
- cb_kwargs=deepcopy(self.cb_kwargs),
158
- err_back=self.err_back,
159
- cookies=self.cookies.copy(),
160
- meta=deepcopy(self._meta),
161
- priority=-self.priority,
162
- dont_filter=self.dont_filter,
163
- timeout=self.timeout,
164
- proxy=self.proxy,
165
- allow_redirects=self.allow_redirects,
166
- auth=self.auth,
167
- verify=self.verify,
168
- flags=self.flags.copy(),
169
- encoding=self.encoding
170
- )
171
-
172
- def set_meta(self, key: str, value: Any) -> None:
173
- """设置 meta 中的某个键值。"""
174
- self._meta[key] = value
175
-
176
- def _set_url(self, url: str) -> None:
177
- """安全设置 URL,确保格式正确。"""
178
- if not isinstance(url, str):
179
- raise TypeError(f"Request url 必须为字符串,当前类型: {type(url).__name__}")
180
-
181
- s = safe_url_string(url, self.encoding)
182
- escaped_url = escape_ajax(s)
183
- self._url = escaped_url
184
-
185
- if not self._url.startswith(('http://', 'https://')):
186
- raise ValueError(f"URL 缺少 scheme: {self._url}")
187
-
188
- @property
189
- def url(self) -> str:
190
- return self._url
191
-
192
- @property
193
- def meta(self) -> Dict[str, Any]:
194
- return self._meta
195
-
196
- def __str__(self) -> str:
197
- return f'<Request url={self.url} method={self.method}>'
198
-
199
- def __repr__(self) -> str:
200
- return str(self)
201
-
202
- def __lt__(self, other: _Request) -> bool:
203
- """用于按优先级排序"""
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ import json
4
+ from copy import deepcopy
5
+ from urllib.parse import urlencode
6
+ from w3lib.url import safe_url_string
7
+ from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
8
+
9
+ from crawlo.utils.url import escape_ajax
10
+
11
+
12
+ _Request = TypeVar("_Request", bound="Request")
13
+
14
+
15
+ class RequestPriority:
16
+ """请求优先级常量"""
17
+ HIGH = -100
18
+ NORMAL = 0
19
+ LOW = 100
20
+
21
+
22
+ class Request:
23
+ """
24
+ 封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
25
+ 支持 JSON、表单、原始 body 提交,自动处理 Content-Type 与编码。
26
+ 不支持文件上传(multipart/form-data),保持轻量。
27
+ """
28
+
29
+ __slots__ = (
30
+ '_url',
31
+ '_meta',
32
+ 'callback',
33
+ 'cb_kwargs',
34
+ 'err_back',
35
+ 'headers',
36
+ 'body',
37
+ 'method',
38
+ 'cookies',
39
+ 'priority',
40
+ 'encoding',
41
+ 'dont_filter',
42
+ 'timeout',
43
+ 'proxy',
44
+ 'allow_redirects',
45
+ 'auth',
46
+ 'verify',
47
+ 'flags',
48
+ '_json_body',
49
+ '_form_data'
50
+ )
51
+
52
+ def __init__(
53
+ self,
54
+ url: str,
55
+ callback: Optional[Callable] = None,
56
+ method: Optional[str] = 'GET',
57
+ headers: Optional[Dict[str, str]] = None,
58
+ body: Optional[Union[bytes, str, Dict[Any, Any]]] = None,
59
+ form_data: Optional[Dict[Any, Any]] = None,
60
+ json_body: Optional[Dict[Any, Any]] = None,
61
+ cb_kwargs: Optional[Dict[str, Any]] = None,
62
+ cookies: Optional[Dict[str, str]] = None,
63
+ meta: Optional[Dict[str, Any]] = None,
64
+ priority: int = RequestPriority.NORMAL,
65
+ dont_filter: bool = False,
66
+ timeout: Optional[float] = None,
67
+ proxy: Optional[str] = None,
68
+ allow_redirects: bool = True,
69
+ auth: Optional[tuple] = None,
70
+ verify: bool = True,
71
+ flags: Optional[List[str]] = None,
72
+ encoding: str = 'utf-8'
73
+ ):
74
+ """
75
+ 初始化请求对象。
76
+
77
+ :param url: 请求 URL(必须)
78
+ :param callback: 成功回调函数
79
+ :param method: HTTP 方法,默认 GET
80
+ :param headers: 请求头
81
+ :param body: 原始请求体(bytes/str),若为 dict 且未使用 json_body/form_data,则自动转为 JSON
82
+ :param form_data: 表单数据,自动转为 application/x-www-form-urlencoded
83
+ :param json_body: JSON 数据,自动序列化并设置 Content-Type
84
+ :param cb_kwargs: 传递给 callback 的额外参数
85
+ :param cookies: Cookies 字典
86
+ :param meta: 元数据(跨中间件传递数据)
87
+ :param priority: 优先级(数值越小越优先)
88
+ :param dont_filter: 是否跳过去重
89
+ :param timeout: 超时时间(秒)
90
+ :param proxy: 代理地址,如 http://127.0.0.1:8080
91
+ :param allow_redirects: 是否允许重定向
92
+ :param auth: 认证元组 (username, password)
93
+ :param verify: 是否验证 SSL 证书
94
+ :param flags: 标记(用于调试或分类)
95
+ :param encoding: 字符编码,默认 utf-8
96
+ """
97
+ self.callback = callback
98
+ self.method = str(method).upper()
99
+ self.headers = headers or {}
100
+ self.cookies = cookies or {}
101
+ self.priority = -priority # 用于排序:值越小优先级越高
102
+ self._meta = deepcopy(meta) if meta is not None else {}
103
+ self.timeout = self._meta.get('download_timeout', timeout)
104
+ self.proxy = proxy
105
+ self.allow_redirects = allow_redirects
106
+ self.auth = auth
107
+ self.verify = verify
108
+ self.flags = flags or []
109
+ self.encoding = encoding
110
+ self.cb_kwargs = cb_kwargs or {}
111
+ self.body = body
112
+ # 保存高层语义参数(用于 copy)
113
+ self._json_body = json_body
114
+ self._form_data = form_data
115
+
116
+ # 构建 body
117
+ if json_body is not None:
118
+ if 'Content-Type' not in self.headers:
119
+ self.headers['Content-Type'] = 'application/json'
120
+ self.body = json.dumps(json_body, ensure_ascii=False).encode(encoding)
121
+ if self.method == 'GET':
122
+ self.method = 'POST'
123
+
124
+ elif form_data is not None:
125
+ if self.method == 'GET':
126
+ self.method = 'POST'
127
+ if 'Content-Type' not in self.headers:
128
+ self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
129
+ query_str = urlencode(form_data)
130
+ self.body = query_str.encode(encoding) # ✅ 显式编码为 bytes
131
+
132
+
133
+ else:
134
+ # 处理原始 body
135
+ if isinstance(self.body, dict):
136
+ if 'Content-Type' not in self.headers:
137
+ self.headers['Content-Type'] = 'application/json'
138
+ self.body = json.dumps(self.body, ensure_ascii=False).encode(encoding)
139
+ elif isinstance(self.body, str):
140
+ self.body = self.body.encode(encoding)
141
+
142
+ self.dont_filter = dont_filter
143
+ self._set_url(url)
144
+
145
+ def copy(self: _Request) -> _Request:
146
+ """
147
+ 创建当前请求的副本,保留所有高层语义(json_body/form_data)。
148
+ """
149
+ return type(self)(
150
+ url=self.url,
151
+ callback=self.callback,
152
+ method=self.method,
153
+ headers=self.headers.copy(),
154
+ body=None, # 由 form_data/json_body 重新生成
155
+ form_data=self._form_data,
156
+ json_body=self._json_body,
157
+ cb_kwargs=deepcopy(self.cb_kwargs),
158
+ err_back=self.err_back,
159
+ cookies=self.cookies.copy(),
160
+ meta=deepcopy(self._meta),
161
+ priority=-self.priority,
162
+ dont_filter=self.dont_filter,
163
+ timeout=self.timeout,
164
+ proxy=self.proxy,
165
+ allow_redirects=self.allow_redirects,
166
+ auth=self.auth,
167
+ verify=self.verify,
168
+ flags=self.flags.copy(),
169
+ encoding=self.encoding
170
+ )
171
+
172
+ def set_meta(self, key: str, value: Any) -> None:
173
+ """设置 meta 中的某个键值。"""
174
+ self._meta[key] = value
175
+
176
+ def _set_url(self, url: str) -> None:
177
+ """安全设置 URL,确保格式正确。"""
178
+ if not isinstance(url, str):
179
+ raise TypeError(f"Request url 必须为字符串,当前类型: {type(url).__name__}")
180
+
181
+ s = safe_url_string(url, self.encoding)
182
+ escaped_url = escape_ajax(s)
183
+ self._url = escaped_url
184
+
185
+ if not self._url.startswith(('http://', 'https://')):
186
+ raise ValueError(f"URL 缺少 scheme: {self._url}")
187
+
188
+ @property
189
+ def url(self) -> str:
190
+ return self._url
191
+
192
+ @property
193
+ def meta(self) -> Dict[str, Any]:
194
+ return self._meta
195
+
196
+ def __str__(self) -> str:
197
+ return f'<Request url={self.url} method={self.method}>'
198
+
199
+ def __repr__(self) -> str:
200
+ return str(self)
201
+
202
+ def __lt__(self, other: _Request) -> bool:
203
+ """用于按优先级排序"""
204
204
  return self.priority < other.priority