crawlo 1.0.1__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. crawlo-1.0.2/MANIFEST.in +17 -0
  2. {crawlo-1.0.1/crawlo.egg-info → crawlo-1.0.2}/PKG-INFO +23 -11
  3. crawlo-1.0.2/README.md +2 -0
  4. crawlo-1.0.2/crawlo/__version__.py +2 -0
  5. crawlo-1.0.2/crawlo/crawler.py +222 -0
  6. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/retry.py +2 -1
  7. crawlo-1.0.2/crawlo/network/request.py +234 -0
  8. crawlo-1.0.2/crawlo/network/response.py +162 -0
  9. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/mysql_pipeline.py +38 -19
  10. crawlo-1.0.2/crawlo/subscriber.py +106 -0
  11. crawlo-1.0.2/crawlo/utils/concurrency_manager.py +125 -0
  12. crawlo-1.0.2/crawlo/utils/tools.py +303 -0
  13. {crawlo-1.0.1 → crawlo-1.0.2/crawlo.egg-info}/PKG-INFO +23 -11
  14. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo.egg-info/SOURCES.txt +3 -0
  15. crawlo-1.0.2/crawlo.egg-info/requires.txt +34 -0
  16. crawlo-1.0.2/pyproject.toml +3 -0
  17. crawlo-1.0.2/requirements.txt +21 -0
  18. {crawlo-1.0.1 → crawlo-1.0.2}/setup.cfg +22 -7
  19. crawlo-1.0.1/MANIFEST.in +0 -11
  20. crawlo-1.0.1/README.md +0 -2
  21. crawlo-1.0.1/crawlo/__version__.py +0 -2
  22. crawlo-1.0.1/crawlo/crawler.py +0 -107
  23. crawlo-1.0.1/crawlo/network/request.py +0 -155
  24. crawlo-1.0.1/crawlo/network/response.py +0 -93
  25. crawlo-1.0.1/crawlo/subscriber.py +0 -27
  26. crawlo-1.0.1/crawlo.egg-info/requires.txt +0 -20
  27. crawlo-1.0.1/pyproject.toml +0 -6
  28. {crawlo-1.0.1 → crawlo-1.0.2}/LICENSE +0 -0
  29. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/__init__.py +0 -0
  30. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/core/__init__.py +0 -0
  31. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/core/engine.py +0 -0
  32. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/core/processor.py +0 -0
  33. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/core/scheduler.py +0 -0
  34. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/downloader/__init__.py +0 -0
  35. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/downloader/aiohttp_downloader.py +0 -0
  36. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/downloader/httpx_downloader.py +0 -0
  37. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/downloader/playwright_downloader.py +0 -0
  38. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/event.py +0 -0
  39. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/exceptions.py +0 -0
  40. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/extension/__init__.py +0 -0
  41. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/extension/log_interval.py +0 -0
  42. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/extension/log_stats.py +0 -0
  43. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/filters/__init__.py +0 -0
  44. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/filters/aioredis_filter.py +0 -0
  45. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/filters/memory_filter.py +0 -0
  46. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/filters/redis_filter.py +0 -0
  47. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/items/__init__.py +0 -0
  48. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/items/items.py +0 -0
  49. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/__init__.py +0 -0
  50. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/default_header.py +0 -0
  51. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/download_delay.py +0 -0
  52. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/middleware_manager.py +0 -0
  53. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/request_ignore.py +0 -0
  54. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/response_code.py +0 -0
  55. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/middleware/response_filter.py +0 -0
  56. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/network/__init__.py +0 -0
  57. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/__init__.py +0 -0
  58. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/console_pipeline.py +0 -0
  59. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/mongo_pipeline.py +0 -0
  60. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/mysql_batch_pipline.py +0 -0
  61. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/pipelines/pipeline_manager.py +0 -0
  62. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/settings/__init__.py +0 -0
  63. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/settings/default_settings.py +0 -0
  64. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/settings/setting_manager.py +0 -0
  65. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/spider/__init__.py +0 -0
  66. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/stats_collector.py +0 -0
  67. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/task_manager.py +0 -0
  68. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/item_template.tmpl +0 -0
  69. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/project_template/items/__init__.py +0 -0
  70. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/project_template/main.py +0 -0
  71. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/project_template/setting.py +0 -0
  72. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/project_template/spiders/__init__.py +0 -0
  73. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/templates/spider_template.tmpl +0 -0
  74. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/__init__.py +0 -0
  75. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/date_tools.py +0 -0
  76. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/func_tools.py +0 -0
  77. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/log.py +0 -0
  78. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/pqueue.py +0 -0
  79. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/project.py +0 -0
  80. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/request.py +0 -0
  81. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/system.py +0 -0
  82. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo/utils/url.py +0 -0
  83. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo.egg-info/dependency_links.txt +0 -0
  84. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo.egg-info/entry_points.txt +0 -0
  85. {crawlo-1.0.1 → crawlo-1.0.2}/crawlo.egg-info/top_level.txt +0 -0
@@ -0,0 +1,17 @@
1
+ include README.md
2
+ include LICENSE
3
+ include requirements.txt # 如果根目录有全局requirements.txt
4
+ include VERSION # 如果根目录有全局VERSION文件
5
+
6
+ # 包内文件包含
7
+ recursive-include crawlo/utils/js *
8
+ recursive-include crawlo/templates *
9
+
10
+ # 测试文件(如果需要在分发包中包含测试)
11
+ recursive-include tests *
12
+
13
+ # 排除项
14
+ global-exclude __pycache__ *.py[cod] .DS_Store *.so
15
+ global-exclude *.bak *.swp *.orig *.rej
16
+ prune samples # 排除示例目录
17
+ prune docs # 排除文档目录
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: crawlo
3
- Version: 1.0.1
4
- Summary: Crawlo是一款支持异步的python爬虫框架
3
+ Version: 1.0.2
4
+ Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
5
  Home-page: https://github.com/crawl-coder/Crawlo.git
6
6
  Author: crawl-coder
7
7
  Author-email: crawlo@qq.com
@@ -11,13 +11,26 @@ Classifier: License :: OSI Approved :: MIT License
11
11
  Classifier: Operating System :: OS Independent
12
12
  Requires-Python: >=3.6
13
13
  Description-Content-Type: text/markdown
14
- License-File: LICENSE
15
- Requires-Dist: aiohttp>=3.12.6
16
- Requires-Dist: httpx>=0.28.1
17
- Requires-Dist: DBUtils>=2.0
18
- Requires-Dist: parsel>=1.10.0
19
- Requires-Dist: pymysql>=1.1.1
20
- Requires-Dist: ujson>=5.10.0
14
+ Requires-Dist: aiohttp>=3.12.14
15
+ Requires-Dist: aiomysql>=0.2.0
16
+ Requires-Dist: aioredis>=2.0.1
17
+ Requires-Dist: asyncmy>=0.2.10
18
+ Requires-Dist: cssselect>=1.2.0
19
+ Requires-Dist: dateparser>=1.2.2
20
+ Requires-Dist: httpx>=0.27.0
21
+ Requires-Dist: lxml>=5.2.1
22
+ Requires-Dist: motor>=3.7.0
23
+ Requires-Dist: parsel>=1.9.1
24
+ Requires-Dist: pydantic>=2.11.7
25
+ Requires-Dist: pymongo>=4.11
26
+ Requires-Dist: PyMySQL>=1.1.1
27
+ Requires-Dist: python-dateutil>=2.9.0.post0
28
+ Requires-Dist: redis>=6.2.0
29
+ Requires-Dist: requests>=2.32.4
30
+ Requires-Dist: six>=1.17.0
31
+ Requires-Dist: ujson>=5.9.0
32
+ Requires-Dist: urllib3>=2.5.0
33
+ Requires-Dist: w3lib>=2.1.2
21
34
  Provides-Extra: render
22
35
  Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
23
36
  Requires-Dist: playwright; extra == "render"
@@ -30,7 +43,6 @@ Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
30
43
  Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
31
44
  Requires-Dist: playwright; extra == "all"
32
45
  Requires-Dist: selenium>=3.141.0; extra == "all"
33
- Dynamic: license-file
34
46
 
35
47
  # Crawlo
36
- 异步通用爬虫框架
48
+ Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
crawlo-1.0.2/README.md ADDED
@@ -0,0 +1,2 @@
1
+ # Crawlo
2
+ Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
@@ -0,0 +1,2 @@
1
+
2
+ __version__ = "1.0.2"
@@ -0,0 +1,222 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*
3
+ import signal
4
+ import asyncio
5
+ from typing import Type, Final, Set, Optional
6
+
7
+ from crawlo.spider import Spider
8
+ from crawlo.core.engine import Engine
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.subscriber import Subscriber
11
+ from crawlo.extension import ExtensionManager
12
+ from crawlo.exceptions import SpiderTypeError
13
+ from crawlo.stats_collector import StatsCollector
14
+ from crawlo.event import spider_opened, spider_closed
15
+ from crawlo.settings.setting_manager import SettingManager
16
+ from crawlo.utils.project import merge_settings, get_settings
17
+ from crawlo.utils.concurrency_manager import calculate_optimal_concurrency
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ class Crawler:
23
+
24
+ def __init__(self, spider_cls, settings):
25
+ self.spider_cls = spider_cls
26
+ self.spider: Optional[Spider] = None
27
+ self.engine: Optional[Engine] = None
28
+ self.stats: Optional[StatsCollector] = None
29
+ self.subscriber: Optional[Subscriber] = None
30
+ self.extension: Optional[ExtensionManager] = None
31
+ self.settings: SettingManager = settings.copy()
32
+
33
+ async def crawl(self):
34
+ self.subscriber = self._create_subscriber()
35
+ self.spider = self._create_spider()
36
+ self.engine = self._create_engine()
37
+ self.stats = self._create_stats()
38
+ self.extension = self._create_extension()
39
+
40
+ await self.engine.start_spider(self.spider)
41
+
42
+ @staticmethod
43
+ def _create_subscriber():
44
+ return Subscriber()
45
+
46
+ def _create_spider(self) -> Spider:
47
+ spider = self.spider_cls.create_instance(self)
48
+ self._set_spider(spider)
49
+ return spider
50
+
51
+ def _create_engine(self) -> Engine:
52
+ engine = Engine(self)
53
+ engine.engine_start()
54
+ return engine
55
+
56
+ def _create_stats(self) -> StatsCollector:
57
+ stats = StatsCollector(self)
58
+ return stats
59
+
60
+ def _create_extension(self) -> ExtensionManager:
61
+ extension = ExtensionManager.create_instance(self)
62
+ return extension
63
+
64
+ def _set_spider(self, spider):
65
+ self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
66
+ self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
67
+ merge_settings(spider, self.settings)
68
+
69
+ async def close(self, reason='finished') -> None:
70
+ await asyncio.create_task(self.subscriber.notify(spider_closed))
71
+ self.stats.close_spider(spider_name=self.spider, reason=reason)
72
+
73
+
74
+ class CrawlerProcess:
75
+ """爬虫处理类,支持跨平台动态并发控制和精细化日志"""
76
+
77
+ def __init__(self, settings=None, max_concurrency: Optional[int] = None, batch_size: int = 50):
78
+ self.crawlers: Final[Set] = set()
79
+ self._active_spiders: Final[Set] = set()
80
+ self.settings = settings or self._get_default_settings()
81
+ self.batch_size = batch_size
82
+
83
+ # 使用独立模块计算最大并发数
84
+ self.max_concurrency = calculate_optimal_concurrency(max_concurrency)
85
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
86
+
87
+ signal.signal(signal.SIGINT, self._shutdown)
88
+ logger.info(f"初始化爬虫处理进程,最大并发数: {self.max_concurrency}")
89
+
90
+ async def crawl(self, spiders):
91
+ """支持单个或多个爬虫的批量处理,优化日志输出"""
92
+ if not spiders:
93
+ raise ValueError("至少需要提供一个爬虫类")
94
+
95
+ # 统一转换为列表
96
+ if isinstance(spiders, type) and issubclass(spiders, Spider):
97
+ spiders = [spiders]
98
+ elif isinstance(spiders, (list, tuple)):
99
+ spiders = list(spiders)
100
+ else:
101
+ raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
102
+
103
+ # 按爬虫类名首字母排序(升序)
104
+ spiders.sort(key=lambda x: x.__name__.lower())
105
+
106
+ if len(spiders) == 1:
107
+ logger.info(f"启动爬虫: {spiders[0].__name__}")
108
+ else:
109
+ logger.info(f"启动{len(spiders)}个爬虫,按名称排序后分批处理中")
110
+
111
+ batches = [spiders[i:i + self.batch_size] for i in range(0, len(spiders), self.batch_size)]
112
+
113
+ for batch_idx, batch in enumerate(batches):
114
+ batch_tasks = set()
115
+
116
+ for spider_cls in batch:
117
+ crawler = self._create_crawler(spider_cls)
118
+ self.crawlers.add(crawler)
119
+
120
+ await self.semaphore.acquire()
121
+ task = asyncio.create_task(self._run_crawler_with_semaphore(crawler))
122
+ batch_tasks.add(task)
123
+ self._active_spiders.add(task)
124
+
125
+ if len(spiders) > 1: # 仅对多爬虫显示批次信息
126
+ logger.info(f"启动第 {batch_idx + 1}/{len(batches)} 批爬虫,共 {len(batch)} 个")
127
+
128
+ await asyncio.gather(*batch_tasks)
129
+
130
+ if len(spiders) > 1: # 仅对多爬虫显示批次完成信息
131
+ logger.info(f"第 {batch_idx + 1} 批爬虫处理完成")
132
+
133
+ async def _run_crawler_with_semaphore(self, crawler):
134
+ """使用信号量控制的爬虫运行函数"""
135
+ try:
136
+ await crawler.crawl()
137
+ finally:
138
+ self.semaphore.release() # 确保资源释放
139
+
140
+ async def start(self):
141
+ """启动所有爬虫任务"""
142
+ if self._active_spiders:
143
+ logger.info(f"启动 {len(self._active_spiders)} 个爬虫任务,并发限制: {self.max_concurrency}")
144
+ await asyncio.gather(*self._active_spiders)
145
+
146
+ def _create_crawler(self, spider_cls) -> Crawler:
147
+ """创建爬虫实例"""
148
+ if isinstance(spider_cls, str):
149
+ raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
150
+ crawler: Crawler = Crawler(spider_cls, self.settings)
151
+ return crawler
152
+
153
+ def _shutdown(self, _signum, _frame):
154
+ """优雅关闭所有爬虫"""
155
+ logger.warning(f"收到关闭信号,正在优雅关闭 {len(self.crawlers)} 个爬虫...")
156
+ for crawler in self.crawlers:
157
+ if crawler.engine:
158
+ crawler.engine.running = False
159
+ crawler.engine.normal = False
160
+ crawler.stats.close_spider(crawler.spider, 'shutdown signal')
161
+
162
+ # 等待所有任务完成
163
+ asyncio.create_task(self._wait_for_tasks())
164
+
165
+ async def _wait_for_tasks(self):
166
+ """等待所有活跃任务完成"""
167
+ pending = [task for task in self._active_spiders if not task.done()]
168
+ if pending:
169
+ logger.info(f"等待 {len(pending)} 个活跃任务完成...")
170
+ await asyncio.gather(*pending)
171
+ logger.info("所有爬虫已优雅关闭")
172
+
173
+ @classmethod
174
+ def _get_default_settings(cls):
175
+ """框架自动获取默认配置"""
176
+ try:
177
+ return get_settings()
178
+ except ImportError:
179
+ return {}
180
+
181
+ # class CrawlerProcess:
182
+ #
183
+ # def __init__(self, settings=None):
184
+ # self.crawlers: Final[Set] = set()
185
+ # self._active_spiders: Final[Set] = set()
186
+ # self.settings = settings or self._get_default_settings()
187
+ #
188
+ # signal.signal(signal.SIGINT, self._shutdown)
189
+ #
190
+ # async def crawl(self, spider: Type[Spider]):
191
+ # crawler: Crawler = self._create_crawler(spider)
192
+ # self.crawlers.add(crawler)
193
+ # task = await self._crawl(crawler)
194
+ # self._active_spiders.add(task)
195
+ #
196
+ # @classmethod
197
+ # def _get_default_settings(cls):
198
+ # """自动获取默认配置"""
199
+ # try:
200
+ # return get_settings()
201
+ # except ImportError:
202
+ # return {}
203
+ #
204
+ # @staticmethod
205
+ # async def _crawl(crawler):
206
+ # return asyncio.create_task(crawler.crawl())
207
+ #
208
+ # async def start(self):
209
+ # await asyncio.gather(*self._active_spiders)
210
+ #
211
+ # def _create_crawler(self, spider_cls) -> Crawler:
212
+ # if isinstance(spider_cls, str):
213
+ # raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
214
+ # crawler: Crawler = Crawler(spider_cls, self.settings)
215
+ # return crawler
216
+ #
217
+ # def _shutdown(self, _signum, _frame):
218
+ # for crawler in self.crawlers:
219
+ # crawler.engine.running = False
220
+ # crawler.engine.normal = False
221
+ # crawler.stats.close_spider(crawler.spider, 'Ctrl C')
222
+ # logger.warning(f'spiders received: `Ctrl C` signal, closed.')
@@ -80,7 +80,8 @@ class RetryMiddleware(object):
80
80
  retry_times += 1
81
81
  self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
82
82
  request.meta['retry_times'] = retry_times
83
- request.dont_retry = True
83
+ # request.dont_retry = True
84
+ request.meta['dont_retry'] = True
84
85
  request.retry_priority = request.priority + self.retry_priority
85
86
  self.stats.inc_value("retry_count")
86
87
  return request
@@ -0,0 +1,234 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ import json
4
+ from copy import deepcopy
5
+ from urllib.parse import urlencode
6
+ from w3lib.url import safe_url_string
7
+ from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
8
+
9
+ from crawlo.utils.url import escape_ajax
10
+
11
+ _Request = TypeVar("_Request", bound="Request")
12
+
13
+
14
+ class RequestPriority:
15
+ HIGH = -100
16
+ NORMAL = 0
17
+ LOW = 100
18
+
19
+
20
+ class Request:
21
+ """
22
+ 封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
23
+ 支持设置回调函数、请求头、请求体、优先级、元数据等。
24
+ """
25
+
26
+ __slots__ = (
27
+ '_url',
28
+ '_meta',
29
+ 'callback',
30
+ 'cb_kwargs',
31
+ 'err_back',
32
+ 'headers',
33
+ 'body',
34
+ 'method',
35
+ 'cookies',
36
+ 'priority',
37
+ 'encoding',
38
+ 'dont_filter',
39
+ 'timeout',
40
+ 'proxy',
41
+ 'allow_redirects',
42
+ 'auth',
43
+ 'verify',
44
+ 'flags'
45
+ )
46
+
47
+ def __init__(
48
+ self,
49
+ url: str,
50
+ callback: Optional[Callable] = None,
51
+ method: Optional[str] = 'GET',
52
+ headers: Optional[Dict[str, str]] = None,
53
+ body: Optional[Union[Dict, bytes, str]] = None,
54
+ form_data: Optional[Dict] = None,
55
+ json_body: Optional[Dict] = None, # ✅ 参数名从 json 改为 json_body
56
+ cb_kwargs: Optional[Dict[str, Any]] = None,
57
+ err_back: Optional[Callable] = None,
58
+ cookies: Optional[Dict[str, str]] = None,
59
+ meta: Optional[Dict[str, Any]] = None,
60
+ priority: int = RequestPriority.NORMAL,
61
+ dont_filter: bool = False,
62
+ timeout: Optional[float] = None,
63
+ proxy: Optional[str] = None,
64
+ allow_redirects: bool = True,
65
+ auth: Optional[tuple] = None,
66
+ verify: bool = True,
67
+ flags: Optional[List[str]] = None,
68
+ encoding: str = 'utf-8'
69
+ ):
70
+ """
71
+ 初始化请求对象。
72
+
73
+ 参数说明:
74
+ :param url: 请求的 URL 地址(必须)
75
+ :param callback: 响应处理回调函数(可选)
76
+ :param method: HTTP 请求方法,默认为 GET
77
+ :param headers: 请求头(可选)
78
+ :param body: 请求体(可为 dict、bytes 或 str)
79
+ :param form_data 表单数据,自动设置为 POST 并构造 x-www-form-urlencoded 请求体
80
+ :param json_body: 用于构造 JSON 请求体,自动设置 Content-Type 为 application/json
81
+ :param cb_kwargs: 传递给回调函数的额外参数(可选)
82
+ :param err_back: 请求失败时的错误回调函数(可选)
83
+ :param cookies: 请求 cookies(可选)
84
+ :param meta: 元数据字典,用于在请求间传递数据
85
+ :param priority: 请求优先级,数值越小优先级越高(默认为 0)
86
+ :param dont_filter: 是否跳过去重过滤(默认为 False)
87
+ :param timeout: 请求超时时间(秒)
88
+ :param proxy: 代理地址(如:http://127.0.0.1:8080)
89
+ :param allow_redirects: 是否允许重定向(默认为 True)
90
+ :param auth: 认证信息,格式为 (username, password)
91
+ :param verify: 是否验证 SSL 证书(默认为 True)
92
+ :param flags: 请求标记(调试、重试等用途)
93
+ """
94
+ self.callback = callback
95
+ self.method = str(method).upper()
96
+ self.headers = headers or {}
97
+ self.body = body
98
+ self.cb_kwargs = cb_kwargs or {}
99
+ self.err_back = err_back
100
+ self.cookies = cookies or {}
101
+ self.priority = -priority # 高优先级值更小,便于排序
102
+ self._meta = deepcopy(meta) if meta is not None else {}
103
+ self.timeout = self._meta.get('download_timeout', timeout)
104
+ self.proxy = proxy
105
+ self.allow_redirects = allow_redirects
106
+ self.auth = auth
107
+ self.verify = verify
108
+ self.flags = flags or []
109
+
110
+ # 默认编码
111
+ self.encoding = encoding
112
+
113
+ # 优先使用 json_body 参数
114
+ if json_body is not None:
115
+ if 'Content-Type' not in self.headers:
116
+ self.headers['Content-Type'] = 'application/json'
117
+ self.body = json.dumps(json_body, ensure_ascii=False).encode(self.encoding)
118
+ if self.method == 'GET':
119
+ self.method = 'POST'
120
+
121
+ # 其次使用 form_data
122
+ elif form_data is not None:
123
+ if self.method == 'GET':
124
+ self.method = 'POST'
125
+ if 'Content-Type' not in self.headers:
126
+ self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
127
+ self.body = urlencode(form_data)
128
+
129
+ # 最后处理 body 为 dict 的情况
130
+ elif isinstance(self.body, dict):
131
+ if 'Content-Type' not in self.headers:
132
+ self.headers['Content-Type'] = 'application/json'
133
+ self.body = json.dumps(self.body, ensure_ascii=False).encode(self.encoding)
134
+
135
+ self.dont_filter = dont_filter
136
+ self._set_url(url)
137
+
138
+ def copy(self: _Request) -> _Request:
139
+ """
140
+ 创建当前 Request 的副本,用于避免引用共享数据。
141
+
142
+ :return: 一个新的 Request 实例
143
+ """
144
+ return type(self)(
145
+ url=self.url,
146
+ callback=self.callback,
147
+ method=self.method,
148
+ headers=self.headers.copy(),
149
+ body=self.body,
150
+ form_data=None, # form_data 不参与复制
151
+ json_body=None, # json_body 参数也不参与复制
152
+ cb_kwargs=deepcopy(self.cb_kwargs),
153
+ err_back=self.err_back,
154
+ cookies=self.cookies.copy(),
155
+ meta=deepcopy(self._meta),
156
+ priority=-self.priority,
157
+ dont_filter=self.dont_filter,
158
+ timeout=self.timeout,
159
+ proxy=self.proxy,
160
+ allow_redirects=self.allow_redirects,
161
+ auth=self.auth,
162
+ verify=self.verify,
163
+ flags=self.flags.copy(),
164
+ )
165
+
166
+ def set_meta(self, key: str, value: Any) -> None:
167
+ """
168
+ 设置 meta 中的某个键值对。
169
+
170
+ :param key: 要设置的键
171
+ :param value: 对应的值
172
+ """
173
+ self._meta[key] = value
174
+
175
+ def _set_url(self, url: str) -> None:
176
+ """
177
+ 设置并验证 URL,确保其格式正确且包含 scheme。
178
+
179
+ :param url: 原始 URL 字符串
180
+ :raises TypeError: 如果传入的不是字符串
181
+ :raises ValueError: 如果 URL 没有 scheme
182
+ """
183
+ if not isinstance(url, str):
184
+ raise TypeError(f"Request url 必须为字符串类型,当前类型为 {type(url).__name__}")
185
+
186
+ s = safe_url_string(url, self.encoding)
187
+ escaped_url = escape_ajax(s)
188
+ self._url = escaped_url
189
+
190
+ if not self._url.startswith(('http://', 'https://', 'about:', '')):
191
+ raise ValueError(f"请求 URL 缺少 scheme(如 http://): {self._url}")
192
+
193
+ @property
194
+ def url(self) -> str:
195
+ """
196
+ 获取请求的 URL。
197
+
198
+ :return: 当前请求的 URL 字符串
199
+ """
200
+ return self._url
201
+
202
+ @property
203
+ def meta(self) -> Dict[str, Any]:
204
+ """
205
+ 获取请求的元数据。
206
+
207
+ :return: 元数据字典
208
+ """
209
+ return self._meta
210
+
211
+ def __str__(self) -> str:
212
+ """
213
+ 返回对象的字符串表示,用于调试和日志输出。
214
+
215
+ :return: 字符串 <Request url=... method=...>
216
+ """
217
+ return f'<Request url={self.url} method={self.method}>'
218
+
219
+ def __repr__(self) -> str:
220
+ """
221
+ 返回对象的官方字符串表示。
222
+
223
+ :return: 字符串,与 __str__ 相同
224
+ """
225
+ return str(self)
226
+
227
+ def __lt__(self, other: _Request) -> bool:
228
+ """
229
+ 比较两个请求的优先级,用于排序。
230
+
231
+ :param other: 另一个 Request 对象
232
+ :return: 如果当前请求优先级更高(数值更小)返回 True
233
+ """
234
+ return self.priority < other.priority