crawlo 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

crawlo/__version__.py CHANGED
@@ -1,2 +1,2 @@
1
1
 
2
- __version__ = "1.0.1"
2
+ __version__ = "1.0.2"
crawlo/crawler.py CHANGED
@@ -6,15 +6,15 @@ from typing import Type, Final, Set, Optional
6
6
 
7
7
  from crawlo.spider import Spider
8
8
  from crawlo.core.engine import Engine
9
- from crawlo.subscriber import Subscriber
10
-
11
9
  from crawlo.utils.log import get_logger
10
+ from crawlo.subscriber import Subscriber
12
11
  from crawlo.extension import ExtensionManager
13
12
  from crawlo.exceptions import SpiderTypeError
14
- from crawlo.utils.project import merge_settings
15
13
  from crawlo.stats_collector import StatsCollector
16
14
  from crawlo.event import spider_opened, spider_closed
17
15
  from crawlo.settings.setting_manager import SettingManager
16
+ from crawlo.utils.project import merge_settings, get_settings
17
+ from crawlo.utils.concurrency_manager import calculate_optimal_concurrency
18
18
 
19
19
  logger = get_logger(__name__)
20
20
 
@@ -72,36 +72,151 @@ class Crawler:
72
72
 
73
73
 
74
74
  class CrawlerProcess:
75
+ """爬虫处理类,支持跨平台动态并发控制和精细化日志"""
75
76
 
76
- def __init__(self, settings=None):
77
+ def __init__(self, settings=None, max_concurrency: Optional[int] = None, batch_size: int = 50):
77
78
  self.crawlers: Final[Set] = set()
78
79
  self._active_spiders: Final[Set] = set()
79
- self.settings = settings
80
+ self.settings = settings or self._get_default_settings()
81
+ self.batch_size = batch_size
82
+
83
+ # 使用独立模块计算最大并发数
84
+ self.max_concurrency = calculate_optimal_concurrency(max_concurrency)
85
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
80
86
 
81
87
  signal.signal(signal.SIGINT, self._shutdown)
88
+ logger.info(f"初始化爬虫处理进程,最大并发数: {self.max_concurrency}")
82
89
 
83
- async def crawl(self, spider: Type[Spider]):
84
- crawler: Crawler = self._create_crawler(spider)
85
- self.crawlers.add(crawler)
86
- task = await self._crawl(crawler)
87
- self._active_spiders.add(task)
90
+ async def crawl(self, spiders):
91
+ """支持单个或多个爬虫的批量处理,优化日志输出"""
92
+ if not spiders:
93
+ raise ValueError("至少需要提供一个爬虫类")
88
94
 
89
- @staticmethod
90
- async def _crawl(crawler):
91
- return asyncio.create_task(crawler.crawl())
95
+ # 统一转换为列表
96
+ if isinstance(spiders, type) and issubclass(spiders, Spider):
97
+ spiders = [spiders]
98
+ elif isinstance(spiders, (list, tuple)):
99
+ spiders = list(spiders)
100
+ else:
101
+ raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
102
+
103
+ # 按爬虫类名首字母排序(升序)
104
+ spiders.sort(key=lambda x: x.__name__.lower())
105
+
106
+ if len(spiders) == 1:
107
+ logger.info(f"启动爬虫: {spiders[0].__name__}")
108
+ else:
109
+ logger.info(f"启动{len(spiders)}个爬虫,按名称排序后分批处理中")
110
+
111
+ batches = [spiders[i:i + self.batch_size] for i in range(0, len(spiders), self.batch_size)]
112
+
113
+ for batch_idx, batch in enumerate(batches):
114
+ batch_tasks = set()
115
+
116
+ for spider_cls in batch:
117
+ crawler = self._create_crawler(spider_cls)
118
+ self.crawlers.add(crawler)
119
+
120
+ await self.semaphore.acquire()
121
+ task = asyncio.create_task(self._run_crawler_with_semaphore(crawler))
122
+ batch_tasks.add(task)
123
+ self._active_spiders.add(task)
124
+
125
+ if len(spiders) > 1: # 仅对多爬虫显示批次信息
126
+ logger.info(f"启动第 {batch_idx + 1}/{len(batches)} 批爬虫,共 {len(batch)} 个")
127
+
128
+ await asyncio.gather(*batch_tasks)
129
+
130
+ if len(spiders) > 1: # 仅对多爬虫显示批次完成信息
131
+ logger.info(f"第 {batch_idx + 1} 批爬虫处理完成")
132
+
133
+ async def _run_crawler_with_semaphore(self, crawler):
134
+ """使用信号量控制的爬虫运行函数"""
135
+ try:
136
+ await crawler.crawl()
137
+ finally:
138
+ self.semaphore.release() # 确保资源释放
92
139
 
93
140
  async def start(self):
94
- await asyncio.gather(*self._active_spiders)
141
+ """启动所有爬虫任务"""
142
+ if self._active_spiders:
143
+ logger.info(f"启动 {len(self._active_spiders)} 个爬虫任务,并发限制: {self.max_concurrency}")
144
+ await asyncio.gather(*self._active_spiders)
95
145
 
96
146
  def _create_crawler(self, spider_cls) -> Crawler:
147
+ """创建爬虫实例"""
97
148
  if isinstance(spider_cls, str):
98
149
  raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
99
150
  crawler: Crawler = Crawler(spider_cls, self.settings)
100
151
  return crawler
101
152
 
102
153
  def _shutdown(self, _signum, _frame):
154
+ """优雅关闭所有爬虫"""
155
+ logger.warning(f"收到关闭信号,正在优雅关闭 {len(self.crawlers)} 个爬虫...")
103
156
  for crawler in self.crawlers:
104
- crawler.engine.running = False
105
- crawler.engine.normal = False
106
- crawler.stats.close_spider(crawler.spider, 'Ctrl C')
107
- logger.warning(f'spiders received: `Ctrl C` signal, closed.')
157
+ if crawler.engine:
158
+ crawler.engine.running = False
159
+ crawler.engine.normal = False
160
+ crawler.stats.close_spider(crawler.spider, 'shutdown signal')
161
+
162
+ # 等待所有任务完成
163
+ asyncio.create_task(self._wait_for_tasks())
164
+
165
+ async def _wait_for_tasks(self):
166
+ """等待所有活跃任务完成"""
167
+ pending = [task for task in self._active_spiders if not task.done()]
168
+ if pending:
169
+ logger.info(f"等待 {len(pending)} 个活跃任务完成...")
170
+ await asyncio.gather(*pending)
171
+ logger.info("所有爬虫已优雅关闭")
172
+
173
+ @classmethod
174
+ def _get_default_settings(cls):
175
+ """框架自动获取默认配置"""
176
+ try:
177
+ return get_settings()
178
+ except ImportError:
179
+ return {}
180
+
181
+ # class CrawlerProcess:
182
+ #
183
+ # def __init__(self, settings=None):
184
+ # self.crawlers: Final[Set] = set()
185
+ # self._active_spiders: Final[Set] = set()
186
+ # self.settings = settings or self._get_default_settings()
187
+ #
188
+ # signal.signal(signal.SIGINT, self._shutdown)
189
+ #
190
+ # async def crawl(self, spider: Type[Spider]):
191
+ # crawler: Crawler = self._create_crawler(spider)
192
+ # self.crawlers.add(crawler)
193
+ # task = await self._crawl(crawler)
194
+ # self._active_spiders.add(task)
195
+ #
196
+ # @classmethod
197
+ # def _get_default_settings(cls):
198
+ # """自动获取默认配置"""
199
+ # try:
200
+ # return get_settings()
201
+ # except ImportError:
202
+ # return {}
203
+ #
204
+ # @staticmethod
205
+ # async def _crawl(crawler):
206
+ # return asyncio.create_task(crawler.crawl())
207
+ #
208
+ # async def start(self):
209
+ # await asyncio.gather(*self._active_spiders)
210
+ #
211
+ # def _create_crawler(self, spider_cls) -> Crawler:
212
+ # if isinstance(spider_cls, str):
213
+ # raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
214
+ # crawler: Crawler = Crawler(spider_cls, self.settings)
215
+ # return crawler
216
+ #
217
+ # def _shutdown(self, _signum, _frame):
218
+ # for crawler in self.crawlers:
219
+ # crawler.engine.running = False
220
+ # crawler.engine.normal = False
221
+ # crawler.stats.close_spider(crawler.spider, 'Ctrl C')
222
+ # logger.warning(f'spiders received: `Ctrl C` signal, closed.')
@@ -80,7 +80,8 @@ class RetryMiddleware(object):
80
80
  retry_times += 1
81
81
  self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
82
82
  request.meta['retry_times'] = retry_times
83
- request.dont_retry = True
83
+ # request.dont_retry = True
84
+ request.meta['dont_retry'] = True
84
85
  request.retry_priority = request.priority + self.retry_priority
85
86
  self.stats.inc_value("retry_count")
86
87
  return request
crawlo/network/request.py CHANGED
@@ -1,155 +1,234 @@
1
1
  #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import hashlib
2
+ # -*- coding: UTF-8 -*-
3
+ import json
4
4
  from copy import deepcopy
5
+ from urllib.parse import urlencode
5
6
  from w3lib.url import safe_url_string
6
- from typing import Dict, Optional, Callable, Union, Any
7
+ from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
7
8
 
8
9
  from crawlo.utils.url import escape_ajax
9
10
 
10
-
11
- class Request(object):
11
+ _Request = TypeVar("_Request", bound="Request")
12
+
13
+
14
+ class RequestPriority:
15
+ HIGH = -100
16
+ NORMAL = 0
17
+ LOW = 100
18
+
19
+
20
+ class Request:
21
+ """
22
+ 封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
23
+ 支持设置回调函数、请求头、请求体、优先级、元数据等。
24
+ """
25
+
26
+ __slots__ = (
27
+ '_url',
28
+ '_meta',
29
+ 'callback',
30
+ 'cb_kwargs',
31
+ 'err_back',
32
+ 'headers',
33
+ 'body',
34
+ 'method',
35
+ 'cookies',
36
+ 'priority',
37
+ 'encoding',
38
+ 'dont_filter',
39
+ 'timeout',
40
+ 'proxy',
41
+ 'allow_redirects',
42
+ 'auth',
43
+ 'verify',
44
+ 'flags'
45
+ )
12
46
 
13
47
  def __init__(
14
- self,
15
- url: str,
16
- *,
17
- callback: Optional[Callable] = None,
18
- headers: Optional[Dict[str, str]] = None,
19
- body: Optional[Union[Dict, bytes, str]] = None,
20
- method: Optional[str] = 'GET',
21
- cookies: Optional[Dict[str, str]] = None,
22
- priority: int = 0,
23
- encoding: Optional[str] = 'utf-8',
24
- meta: Optional[Dict[str, Any]] = None,
25
- dont_filter: bool = False,
26
- timeout: Optional[float] = None,
27
- proxy: Optional[str] = None,
28
- allow_redirects: bool = True,
29
- auth: Optional[tuple] = None,
30
- verify: bool = True
48
+ self,
49
+ url: str,
50
+ callback: Optional[Callable] = None,
51
+ method: Optional[str] = 'GET',
52
+ headers: Optional[Dict[str, str]] = None,
53
+ body: Optional[Union[Dict, bytes, str]] = None,
54
+ form_data: Optional[Dict] = None,
55
+ json_body: Optional[Dict] = None, # ✅ 参数名从 json 改为 json_body
56
+ cb_kwargs: Optional[Dict[str, Any]] = None,
57
+ err_back: Optional[Callable] = None,
58
+ cookies: Optional[Dict[str, str]] = None,
59
+ meta: Optional[Dict[str, Any]] = None,
60
+ priority: int = RequestPriority.NORMAL,
61
+ dont_filter: bool = False,
62
+ timeout: Optional[float] = None,
63
+ proxy: Optional[str] = None,
64
+ allow_redirects: bool = True,
65
+ auth: Optional[tuple] = None,
66
+ verify: bool = True,
67
+ flags: Optional[List[str]] = None,
68
+ encoding: str = 'utf-8'
31
69
  ):
32
- # 先初始化基本属性
70
+ """
71
+ 初始化请求对象。
72
+
73
+ 参数说明:
74
+ :param url: 请求的 URL 地址(必须)
75
+ :param callback: 响应处理回调函数(可选)
76
+ :param method: HTTP 请求方法,默认为 GET
77
+ :param headers: 请求头(可选)
78
+ :param body: 请求体(可为 dict、bytes 或 str)
79
+ :param form_data 表单数据,自动设置为 POST 并构造 x-www-form-urlencoded 请求体
80
+ :param json_body: 用于构造 JSON 请求体,自动设置 Content-Type 为 application/json
81
+ :param cb_kwargs: 传递给回调函数的额外参数(可选)
82
+ :param err_back: 请求失败时的错误回调函数(可选)
83
+ :param cookies: 请求 cookies(可选)
84
+ :param meta: 元数据字典,用于在请求间传递数据
85
+ :param priority: 请求优先级,数值越小优先级越高(默认为 0)
86
+ :param dont_filter: 是否跳过去重过滤(默认为 False)
87
+ :param timeout: 请求超时时间(秒)
88
+ :param proxy: 代理地址(如:http://127.0.0.1:8080)
89
+ :param allow_redirects: 是否允许重定向(默认为 True)
90
+ :param auth: 认证信息,格式为 (username, password)
91
+ :param verify: 是否验证 SSL 证书(默认为 True)
92
+ :param flags: 请求标记(调试、重试等用途)
93
+ """
33
94
  self.callback = callback
34
- self.headers = headers if headers else {}
35
- self.body = body
36
95
  self.method = str(method).upper()
37
- self.cookies = cookies if cookies else {}
38
- self.priority = -priority
39
- self.encoding = encoding
40
- self.dont_filter = dont_filter
41
- self._meta = meta if meta is not None else {}
42
- self.timeout = timeout
96
+ self.headers = headers or {}
97
+ self.body = body
98
+ self.cb_kwargs = cb_kwargs or {}
99
+ self.err_back = err_back
100
+ self.cookies = cookies or {}
101
+ self.priority = -priority # 高优先级值更小,便于排序
102
+ self._meta = deepcopy(meta) if meta is not None else {}
103
+ self.timeout = self._meta.get('download_timeout', timeout)
43
104
  self.proxy = proxy
44
105
  self.allow_redirects = allow_redirects
45
106
  self.auth = auth
46
107
  self.verify = verify
108
+ self.flags = flags or []
47
109
 
48
- # 最后处理URL,确保encoding等依赖属性已初始化
110
+ # 默认编码
111
+ self.encoding = encoding
112
+
113
+ # 优先使用 json_body 参数
114
+ if json_body is not None:
115
+ if 'Content-Type' not in self.headers:
116
+ self.headers['Content-Type'] = 'application/json'
117
+ self.body = json.dumps(json_body, ensure_ascii=False).encode(self.encoding)
118
+ if self.method == 'GET':
119
+ self.method = 'POST'
120
+
121
+ # 其次使用 form_data
122
+ elif form_data is not None:
123
+ if self.method == 'GET':
124
+ self.method = 'POST'
125
+ if 'Content-Type' not in self.headers:
126
+ self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
127
+ self.body = urlencode(form_data)
128
+
129
+ # 最后处理 body 为 dict 的情况
130
+ elif isinstance(self.body, dict):
131
+ if 'Content-Type' not in self.headers:
132
+ self.headers['Content-Type'] = 'application/json'
133
+ self.body = json.dumps(self.body, ensure_ascii=False).encode(self.encoding)
134
+
135
+ self.dont_filter = dont_filter
49
136
  self._set_url(url)
50
137
 
51
- def copy(self):
52
- return deepcopy(self)
138
+ def copy(self: _Request) -> _Request:
139
+ """
140
+ 创建当前 Request 的副本,用于避免引用共享数据。
141
+
142
+ :return: 一个新的 Request 实例
143
+ """
144
+ return type(self)(
145
+ url=self.url,
146
+ callback=self.callback,
147
+ method=self.method,
148
+ headers=self.headers.copy(),
149
+ body=self.body,
150
+ form_data=None, # form_data 不参与复制
151
+ json_body=None, # json_body 参数也不参与复制
152
+ cb_kwargs=deepcopy(self.cb_kwargs),
153
+ err_back=self.err_back,
154
+ cookies=self.cookies.copy(),
155
+ meta=deepcopy(self._meta),
156
+ priority=-self.priority,
157
+ dont_filter=self.dont_filter,
158
+ timeout=self.timeout,
159
+ proxy=self.proxy,
160
+ allow_redirects=self.allow_redirects,
161
+ auth=self.auth,
162
+ verify=self.verify,
163
+ flags=self.flags.copy(),
164
+ )
53
165
 
54
166
  def set_meta(self, key: str, value: Any) -> None:
167
+ """
168
+ 设置 meta 中的某个键值对。
169
+
170
+ :param key: 要设置的键
171
+ :param value: 对应的值
172
+ """
55
173
  self._meta[key] = value
56
174
 
57
175
  def _set_url(self, url: str) -> None:
176
+ """
177
+ 设置并验证 URL,确保其格式正确且包含 scheme。
178
+
179
+ :param url: 原始 URL 字符串
180
+ :raises TypeError: 如果传入的不是字符串
181
+ :raises ValueError: 如果 URL 没有 scheme
182
+ """
58
183
  if not isinstance(url, str):
59
- raise TypeError(f"Request url must be str, got {type(url).__name__}")
184
+ raise TypeError(f"Request url 必须为字符串类型,当前类型为 {type(url).__name__}")
60
185
 
61
186
  s = safe_url_string(url, self.encoding)
62
- self._url = escape_ajax(s)
187
+ escaped_url = escape_ajax(s)
188
+ self._url = escaped_url
63
189
 
64
- if (
65
- "://" not in self._url
66
- and not self._url.startswith("about:")
67
- and not self._url.startswith("data:")
68
- ):
69
- raise ValueError(f"Missing scheme in request url: {self._url}")
190
+ if not self._url.startswith(('http://', 'https://', 'about:', '')):
191
+ raise ValueError(f"请求 URL 缺少 scheme(如 http://): {self._url}")
70
192
 
71
193
  @property
72
194
  def url(self) -> str:
195
+ """
196
+ 获取请求的 URL。
197
+
198
+ :return: 当前请求的 URL 字符串
199
+ """
73
200
  return self._url
74
201
 
75
202
  @property
76
203
  def meta(self) -> Dict[str, Any]:
204
+ """
205
+ 获取请求的元数据。
206
+
207
+ :return: 元数据字典
208
+ """
77
209
  return self._meta
78
210
 
79
211
  def __str__(self) -> str:
212
+ """
213
+ 返回对象的字符串表示,用于调试和日志输出。
214
+
215
+ :return: 字符串 <Request url=... method=...>
216
+ """
80
217
  return f'<Request url={self.url} method={self.method}>'
81
218
 
82
219
  def __repr__(self) -> str:
83
- return self.__str__()
84
-
85
- def __lt__(self, other) -> bool:
86
- return self.priority < other.priority
87
-
88
-
89
- # #!/usr/bin/python
90
- # # -*- coding:UTF-8 -*-
91
- # import hashlib
92
- # from copy import deepcopy
93
- # from w3lib.url import safe_url_string
94
- # from typing import Dict, Optional, Callable, Union
95
- #
96
- # from crawlo.utils.url import escape_ajax
97
- #
98
- #
99
- # class Request(object):
100
- #
101
- # def __init__(
102
- # self,
103
- # url: str,
104
- # *,
105
- # callback: Optional[Callable] = None,
106
- # headers: Optional[Dict[str, str]] = None,
107
- # body: Optional[Dict] = None,
108
- # method: Optional[str] = 'GET',
109
- # cookies: Optional[Dict[str, str]] = None,
110
- # priority: int = 0,
111
- # encoding: Optional[str] = 'UTF-8',
112
- # meta: Optional[Dict[str, str]] = None,
113
- # dont_filter: bool = False
114
- #
115
- # ):
116
- # self.url = url
117
- # self.callback = callback
118
- # self.headers = headers if headers else {}
119
- # self.body = body
120
- # self.method = str(method).upper()
121
- # self.cookies = cookies
122
- # self.priority = -priority
123
- # self.encoding = encoding
124
- # self.dont_filter = dont_filter
125
- # self._meta = meta if meta is not None else {}
126
- #
127
- # def copy(self):
128
- # return deepcopy(self)
129
- #
130
- # def set_meta(self, key: str, value: str):
131
- # self._meta[key] = value
132
- #
133
- # def _set_url(self, url: str) -> None:
134
- # if not isinstance(url, str):
135
- # raise TypeError(f"Request url must be str, got {type(url).__name__}")
136
- #
137
- # s = safe_url_string(url, self.encoding)
138
- # self._url = escape_ajax(s)
139
- #
140
- # if (
141
- # "://" not in self._url
142
- # and not self._url.startswith("about:")
143
- # and not self._url.startswith("data:")
144
- # ):
145
- # raise ValueError(f"Missing scheme in request url: {self._url}")
146
- #
147
- # @property
148
- # def meta(self):
149
- # return self._meta
150
- #
151
- # def __str__(self):
152
- # return f'<Request url={self.url}> method={self.method}>'
153
- #
154
- # def __lt__(self, other):
155
- # return self.priority < other.priority
220
+ """
221
+ 返回对象的官方字符串表示。
222
+
223
+ :return: 字符串,与 __str__ 相同
224
+ """
225
+ return str(self)
226
+
227
+ def __lt__(self, other: _Request) -> bool:
228
+ """
229
+ 比较两个请求的优先级,用于排序。
230
+
231
+ :param other: 另一个 Request 对象
232
+ :return: 如果当前请求优先级更高(数值更小)返回 True
233
+ """
234
+ return self.priority < other.priority