crawlo 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +1 -0
- crawlo/__version__.py +1 -1
- crawlo/core/engine.py +9 -7
- crawlo/core/processor.py +1 -1
- crawlo/core/scheduler.py +32 -8
- crawlo/crawler.py +133 -18
- crawlo/downloader/playwright_downloader.py +161 -0
- crawlo/extension/log_stats.py +4 -4
- crawlo/filters/__init__.py +37 -0
- crawlo/filters/aioredis_filter.py +130 -0
- crawlo/filters/memory_filter.py +203 -0
- crawlo/filters/redis_filter.py +120 -0
- crawlo/items/__init__.py +40 -2
- crawlo/items/items.py +36 -5
- crawlo/middleware/retry.py +8 -2
- crawlo/network/request.py +215 -33
- crawlo/network/response.py +122 -53
- crawlo/pipelines/console_pipeline.py +28 -8
- crawlo/pipelines/mongo_pipeline.py +114 -2
- crawlo/pipelines/mysql_batch_pipline.py +134 -0
- crawlo/pipelines/mysql_pipeline.py +192 -2
- crawlo/pipelines/pipeline_manager.py +3 -3
- crawlo/settings/default_settings.py +51 -1
- crawlo/spider/__init__.py +2 -2
- crawlo/subscriber.py +90 -11
- crawlo/utils/concurrency_manager.py +125 -0
- crawlo/utils/date_tools.py +165 -8
- crawlo/utils/func_tools.py +74 -14
- crawlo/utils/pqueue.py +166 -8
- crawlo/utils/project.py +3 -2
- crawlo/utils/request.py +85 -0
- crawlo/utils/tools.py +303 -0
- crawlo/utils/url.py +40 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/METADATA +23 -11
- crawlo-1.0.2.dist-info/RECORD +68 -0
- crawlo-1.0.0.dist-info/RECORD +0 -59
- crawlo-1.0.0.dist-info/licenses/LICENSE +0 -23
- {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/WHEEL +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/entry_points.txt +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/top_level.txt +0 -0
crawlo/network/request.py
CHANGED
|
@@ -1,52 +1,234 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
import
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
import json
|
|
4
4
|
from copy import deepcopy
|
|
5
|
-
from
|
|
5
|
+
from urllib.parse import urlencode
|
|
6
|
+
from w3lib.url import safe_url_string
|
|
7
|
+
from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
|
|
6
8
|
|
|
9
|
+
from crawlo.utils.url import escape_ajax
|
|
7
10
|
|
|
8
|
-
|
|
11
|
+
_Request = TypeVar("_Request", bound="Request")
|
|
9
12
|
|
|
10
|
-
def __init__(
|
|
11
|
-
self,
|
|
12
|
-
url: str,
|
|
13
|
-
*,
|
|
14
|
-
callback: Optional[Callable] = None,
|
|
15
|
-
headers: Optional[Dict[str, str]] = None,
|
|
16
|
-
body: Optional[bytes] = None,
|
|
17
|
-
method: Optional[str] = 'GET',
|
|
18
|
-
cookies: Optional[Dict[str, str]] = None,
|
|
19
|
-
priority: int = 0,
|
|
20
|
-
encoding: Optional[str] = 'UTF-8',
|
|
21
|
-
meta: Optional[Dict[str, str]] = None
|
|
22
13
|
|
|
14
|
+
class RequestPriority:
|
|
15
|
+
HIGH = -100
|
|
16
|
+
NORMAL = 0
|
|
17
|
+
LOW = 100
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Request:
|
|
21
|
+
"""
|
|
22
|
+
封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
|
|
23
|
+
支持设置回调函数、请求头、请求体、优先级、元数据等。
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
__slots__ = (
|
|
27
|
+
'_url',
|
|
28
|
+
'_meta',
|
|
29
|
+
'callback',
|
|
30
|
+
'cb_kwargs',
|
|
31
|
+
'err_back',
|
|
32
|
+
'headers',
|
|
33
|
+
'body',
|
|
34
|
+
'method',
|
|
35
|
+
'cookies',
|
|
36
|
+
'priority',
|
|
37
|
+
'encoding',
|
|
38
|
+
'dont_filter',
|
|
39
|
+
'timeout',
|
|
40
|
+
'proxy',
|
|
41
|
+
'allow_redirects',
|
|
42
|
+
'auth',
|
|
43
|
+
'verify',
|
|
44
|
+
'flags'
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
url: str,
|
|
50
|
+
callback: Optional[Callable] = None,
|
|
51
|
+
method: Optional[str] = 'GET',
|
|
52
|
+
headers: Optional[Dict[str, str]] = None,
|
|
53
|
+
body: Optional[Union[Dict, bytes, str]] = None,
|
|
54
|
+
form_data: Optional[Dict] = None,
|
|
55
|
+
json_body: Optional[Dict] = None, # ✅ 参数名从 json 改为 json_body
|
|
56
|
+
cb_kwargs: Optional[Dict[str, Any]] = None,
|
|
57
|
+
err_back: Optional[Callable] = None,
|
|
58
|
+
cookies: Optional[Dict[str, str]] = None,
|
|
59
|
+
meta: Optional[Dict[str, Any]] = None,
|
|
60
|
+
priority: int = RequestPriority.NORMAL,
|
|
61
|
+
dont_filter: bool = False,
|
|
62
|
+
timeout: Optional[float] = None,
|
|
63
|
+
proxy: Optional[str] = None,
|
|
64
|
+
allow_redirects: bool = True,
|
|
65
|
+
auth: Optional[tuple] = None,
|
|
66
|
+
verify: bool = True,
|
|
67
|
+
flags: Optional[List[str]] = None,
|
|
68
|
+
encoding: str = 'utf-8'
|
|
23
69
|
):
|
|
24
|
-
|
|
70
|
+
"""
|
|
71
|
+
初始化请求对象。
|
|
72
|
+
|
|
73
|
+
参数说明:
|
|
74
|
+
:param url: 请求的 URL 地址(必须)
|
|
75
|
+
:param callback: 响应处理回调函数(可选)
|
|
76
|
+
:param method: HTTP 请求方法,默认为 GET
|
|
77
|
+
:param headers: 请求头(可选)
|
|
78
|
+
:param body: 请求体(可为 dict、bytes 或 str)
|
|
79
|
+
:param form_data 表单数据,自动设置为 POST 并构造 x-www-form-urlencoded 请求体
|
|
80
|
+
:param json_body: 用于构造 JSON 请求体,自动设置 Content-Type 为 application/json
|
|
81
|
+
:param cb_kwargs: 传递给回调函数的额外参数(可选)
|
|
82
|
+
:param err_back: 请求失败时的错误回调函数(可选)
|
|
83
|
+
:param cookies: 请求 cookies(可选)
|
|
84
|
+
:param meta: 元数据字典,用于在请求间传递数据
|
|
85
|
+
:param priority: 请求优先级,数值越小优先级越高(默认为 0)
|
|
86
|
+
:param dont_filter: 是否跳过去重过滤(默认为 False)
|
|
87
|
+
:param timeout: 请求超时时间(秒)
|
|
88
|
+
:param proxy: 代理地址(如:http://127.0.0.1:8080)
|
|
89
|
+
:param allow_redirects: 是否允许重定向(默认为 True)
|
|
90
|
+
:param auth: 认证信息,格式为 (username, password)
|
|
91
|
+
:param verify: 是否验证 SSL 证书(默认为 True)
|
|
92
|
+
:param flags: 请求标记(调试、重试等用途)
|
|
93
|
+
"""
|
|
25
94
|
self.callback = callback
|
|
26
|
-
self.
|
|
95
|
+
self.method = str(method).upper()
|
|
96
|
+
self.headers = headers or {}
|
|
27
97
|
self.body = body
|
|
28
|
-
self.
|
|
29
|
-
self.
|
|
30
|
-
self.
|
|
98
|
+
self.cb_kwargs = cb_kwargs or {}
|
|
99
|
+
self.err_back = err_back
|
|
100
|
+
self.cookies = cookies or {}
|
|
101
|
+
self.priority = -priority # 高优先级值更小,便于排序
|
|
102
|
+
self._meta = deepcopy(meta) if meta is not None else {}
|
|
103
|
+
self.timeout = self._meta.get('download_timeout', timeout)
|
|
104
|
+
self.proxy = proxy
|
|
105
|
+
self.allow_redirects = allow_redirects
|
|
106
|
+
self.auth = auth
|
|
107
|
+
self.verify = verify
|
|
108
|
+
self.flags = flags or []
|
|
109
|
+
|
|
110
|
+
# 默认编码
|
|
31
111
|
self.encoding = encoding
|
|
32
|
-
self._meta = meta if meta is not None else {}
|
|
33
112
|
|
|
34
|
-
|
|
35
|
-
|
|
113
|
+
# 优先使用 json_body 参数
|
|
114
|
+
if json_body is not None:
|
|
115
|
+
if 'Content-Type' not in self.headers:
|
|
116
|
+
self.headers['Content-Type'] = 'application/json'
|
|
117
|
+
self.body = json.dumps(json_body, ensure_ascii=False).encode(self.encoding)
|
|
118
|
+
if self.method == 'GET':
|
|
119
|
+
self.method = 'POST'
|
|
120
|
+
|
|
121
|
+
# 其次使用 form_data
|
|
122
|
+
elif form_data is not None:
|
|
123
|
+
if self.method == 'GET':
|
|
124
|
+
self.method = 'POST'
|
|
125
|
+
if 'Content-Type' not in self.headers:
|
|
126
|
+
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
|
127
|
+
self.body = urlencode(form_data)
|
|
128
|
+
|
|
129
|
+
# 最后处理 body 为 dict 的情况
|
|
130
|
+
elif isinstance(self.body, dict):
|
|
131
|
+
if 'Content-Type' not in self.headers:
|
|
132
|
+
self.headers['Content-Type'] = 'application/json'
|
|
133
|
+
self.body = json.dumps(self.body, ensure_ascii=False).encode(self.encoding)
|
|
134
|
+
|
|
135
|
+
self.dont_filter = dont_filter
|
|
136
|
+
self._set_url(url)
|
|
137
|
+
|
|
138
|
+
def copy(self: _Request) -> _Request:
|
|
139
|
+
"""
|
|
140
|
+
创建当前 Request 的副本,用于避免引用共享数据。
|
|
141
|
+
|
|
142
|
+
:return: 一个新的 Request 实例
|
|
143
|
+
"""
|
|
144
|
+
return type(self)(
|
|
145
|
+
url=self.url,
|
|
146
|
+
callback=self.callback,
|
|
147
|
+
method=self.method,
|
|
148
|
+
headers=self.headers.copy(),
|
|
149
|
+
body=self.body,
|
|
150
|
+
form_data=None, # form_data 不参与复制
|
|
151
|
+
json_body=None, # json_body 参数也不参与复制
|
|
152
|
+
cb_kwargs=deepcopy(self.cb_kwargs),
|
|
153
|
+
err_back=self.err_back,
|
|
154
|
+
cookies=self.cookies.copy(),
|
|
155
|
+
meta=deepcopy(self._meta),
|
|
156
|
+
priority=-self.priority,
|
|
157
|
+
dont_filter=self.dont_filter,
|
|
158
|
+
timeout=self.timeout,
|
|
159
|
+
proxy=self.proxy,
|
|
160
|
+
allow_redirects=self.allow_redirects,
|
|
161
|
+
auth=self.auth,
|
|
162
|
+
verify=self.verify,
|
|
163
|
+
flags=self.flags.copy(),
|
|
164
|
+
)
|
|
36
165
|
|
|
37
|
-
def
|
|
38
|
-
|
|
39
|
-
|
|
166
|
+
def set_meta(self, key: str, value: Any) -> None:
|
|
167
|
+
"""
|
|
168
|
+
设置 meta 中的某个键值对。
|
|
40
169
|
|
|
41
|
-
|
|
170
|
+
:param key: 要设置的键
|
|
171
|
+
:param value: 对应的值
|
|
172
|
+
"""
|
|
42
173
|
self._meta[key] = value
|
|
43
174
|
|
|
175
|
+
def _set_url(self, url: str) -> None:
|
|
176
|
+
"""
|
|
177
|
+
设置并验证 URL,确保其格式正确且包含 scheme。
|
|
178
|
+
|
|
179
|
+
:param url: 原始 URL 字符串
|
|
180
|
+
:raises TypeError: 如果传入的不是字符串
|
|
181
|
+
:raises ValueError: 如果 URL 没有 scheme
|
|
182
|
+
"""
|
|
183
|
+
if not isinstance(url, str):
|
|
184
|
+
raise TypeError(f"Request url 必须为字符串类型,当前类型为 {type(url).__name__}")
|
|
185
|
+
|
|
186
|
+
s = safe_url_string(url, self.encoding)
|
|
187
|
+
escaped_url = escape_ajax(s)
|
|
188
|
+
self._url = escaped_url
|
|
189
|
+
|
|
190
|
+
if not self._url.startswith(('http://', 'https://', 'about:', '')):
|
|
191
|
+
raise ValueError(f"请求 URL 缺少 scheme(如 http://): {self._url}")
|
|
192
|
+
|
|
193
|
+
@property
|
|
194
|
+
def url(self) -> str:
|
|
195
|
+
"""
|
|
196
|
+
获取请求的 URL。
|
|
197
|
+
|
|
198
|
+
:return: 当前请求的 URL 字符串
|
|
199
|
+
"""
|
|
200
|
+
return self._url
|
|
201
|
+
|
|
44
202
|
@property
|
|
45
|
-
def meta(self):
|
|
203
|
+
def meta(self) -> Dict[str, Any]:
|
|
204
|
+
"""
|
|
205
|
+
获取请求的元数据。
|
|
206
|
+
|
|
207
|
+
:return: 元数据字典
|
|
208
|
+
"""
|
|
46
209
|
return self._meta
|
|
47
210
|
|
|
48
|
-
def __str__(self):
|
|
49
|
-
|
|
211
|
+
def __str__(self) -> str:
|
|
212
|
+
"""
|
|
213
|
+
返回对象的字符串表示,用于调试和日志输出。
|
|
214
|
+
|
|
215
|
+
:return: 字符串 <Request url=... method=...>
|
|
216
|
+
"""
|
|
217
|
+
return f'<Request url={self.url} method={self.method}>'
|
|
218
|
+
|
|
219
|
+
def __repr__(self) -> str:
|
|
220
|
+
"""
|
|
221
|
+
返回对象的官方字符串表示。
|
|
222
|
+
|
|
223
|
+
:return: 字符串,与 __str__ 相同
|
|
224
|
+
"""
|
|
225
|
+
return str(self)
|
|
226
|
+
|
|
227
|
+
def __lt__(self, other: _Request) -> bool:
|
|
228
|
+
"""
|
|
229
|
+
比较两个请求的优先级,用于排序。
|
|
50
230
|
|
|
51
|
-
|
|
52
|
-
return
|
|
231
|
+
:param other: 另一个 Request 对象
|
|
232
|
+
:return: 如果当前请求优先级更高(数值更小)返回 True
|
|
233
|
+
"""
|
|
234
|
+
return self.priority < other.priority
|
crawlo/network/response.py
CHANGED
|
@@ -2,8 +2,8 @@
|
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
import re
|
|
4
4
|
import ujson
|
|
5
|
-
from typing import Dict
|
|
6
|
-
from parsel import Selector
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from parsel import Selector, SelectorList
|
|
7
7
|
from http.cookies import SimpleCookie
|
|
8
8
|
from urllib.parse import urljoin as _urljoin
|
|
9
9
|
|
|
@@ -11,17 +11,20 @@ from crawlo import Request
|
|
|
11
11
|
from crawlo.exceptions import DecodeError
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
class Response
|
|
14
|
+
class Response:
|
|
15
|
+
"""
|
|
16
|
+
HTTP响应的封装,提供数据解析的便捷方法。
|
|
17
|
+
"""
|
|
15
18
|
|
|
16
19
|
def __init__(
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
20
|
+
self,
|
|
21
|
+
url: str,
|
|
22
|
+
*,
|
|
23
|
+
headers: Dict[str, Any],
|
|
24
|
+
body: bytes = b"",
|
|
25
|
+
method: str = 'GET',
|
|
26
|
+
request: Request = None,
|
|
27
|
+
status_code: int = 200,
|
|
25
28
|
):
|
|
26
29
|
self.url = url
|
|
27
30
|
self.headers = headers
|
|
@@ -29,65 +32,131 @@ class Response(object):
|
|
|
29
32
|
self.method = method
|
|
30
33
|
self.request = request
|
|
31
34
|
self.status_code = status_code
|
|
32
|
-
self.encoding = request.encoding
|
|
33
|
-
self._selector = None
|
|
35
|
+
self.encoding = self.request.encoding if self.request else None
|
|
34
36
|
self._text_cache = None
|
|
37
|
+
self._selector_instance = None # 修改变量名,避免与 @property 冲突
|
|
35
38
|
|
|
36
39
|
@property
|
|
37
|
-
def text(self):
|
|
38
|
-
|
|
39
|
-
if self._text_cache:
|
|
40
|
+
def text(self) -> str:
|
|
41
|
+
"""将响应体(body)以正确的编码解码为字符串,并缓存结果。"""
|
|
42
|
+
if self._text_cache is not None:
|
|
40
43
|
return self._text_cache
|
|
44
|
+
|
|
45
|
+
encoding = self.encoding
|
|
41
46
|
try:
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
47
|
+
# 优先使用 request 提供的编码
|
|
48
|
+
if encoding:
|
|
49
|
+
self._text_cache = self.body.decode(encoding)
|
|
50
|
+
return self._text_cache
|
|
51
|
+
|
|
52
|
+
# 从 Content-Type 中提取编码
|
|
53
|
+
content_type = self.headers.get("Content-Type", "")
|
|
54
|
+
charset_match = re.search(r"charset=([\w-]+)", content_type, re.I)
|
|
55
|
+
if charset_match:
|
|
56
|
+
encoding = charset_match.group(1)
|
|
57
|
+
self._text_cache = self.body.decode(encoding)
|
|
58
|
+
return self._text_cache
|
|
59
|
+
|
|
60
|
+
# 默认尝试 UTF-8
|
|
61
|
+
self._text_cache = self.body.decode("utf-8")
|
|
62
|
+
return self._text_cache
|
|
63
|
+
|
|
64
|
+
except UnicodeDecodeError as e:
|
|
65
|
+
raise DecodeError(f"Failed to decode response from {self.url}: {e}")
|
|
66
|
+
|
|
67
|
+
def json(self) -> Any:
|
|
68
|
+
"""将响应文本解析为 JSON 对象。"""
|
|
60
69
|
return ujson.loads(self.text)
|
|
61
70
|
|
|
62
|
-
def urljoin(self, url):
|
|
71
|
+
def urljoin(self, url: str) -> str:
|
|
72
|
+
"""拼接 URL,自动处理相对路径。"""
|
|
63
73
|
return _urljoin(self.url, url)
|
|
64
74
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
75
|
+
@property
|
|
76
|
+
def _selector(self) -> Selector:
|
|
77
|
+
"""懒加载 Selector 实例"""
|
|
78
|
+
if self._selector_instance is None:
|
|
79
|
+
self._selector_instance = Selector(self.text)
|
|
80
|
+
return self._selector_instance
|
|
81
|
+
|
|
82
|
+
def xpath(self, query: str) -> SelectorList:
|
|
83
|
+
"""使用 XPath 选择器查询文档。"""
|
|
84
|
+
return self._selector.xpath(query)
|
|
85
|
+
|
|
86
|
+
def css(self, query: str) -> SelectorList:
|
|
87
|
+
"""使用 CSS 选择器查询文档。"""
|
|
88
|
+
return self._selector.css(query)
|
|
89
|
+
|
|
90
|
+
def xpath_text(self, query: str) -> str:
|
|
91
|
+
"""使用 XPath 提取并返回纯文本。"""
|
|
92
|
+
fragments = self.xpath(f"{query}//text()").getall()
|
|
93
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
94
|
+
|
|
95
|
+
def css_text(self, query: str) -> str:
|
|
96
|
+
"""使用 CSS 选择器提取并返回纯文本。"""
|
|
97
|
+
fragments = self.css(f"{query} ::text").getall()
|
|
98
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
99
|
+
|
|
100
|
+
def get_text(self, xpath_or_css: str, join_str: str = " ") -> str:
|
|
101
|
+
"""
|
|
102
|
+
获取指定节点的纯文本(自动拼接子节点文本)
|
|
103
|
+
|
|
104
|
+
参数:
|
|
105
|
+
xpath_or_css: XPath或CSS选择器
|
|
106
|
+
join_str: 文本拼接分隔符(默认为空格)
|
|
107
|
+
|
|
108
|
+
返回:
|
|
109
|
+
拼接后的纯文本字符串
|
|
110
|
+
"""
|
|
111
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
112
|
+
texts = elements.xpath('.//text()').getall()
|
|
113
|
+
return join_str.join(text.strip() for text in texts if text.strip())
|
|
114
|
+
|
|
115
|
+
def get_all_text(self, xpath_or_css: str, join_str: str = " ") -> List[str]:
|
|
116
|
+
"""
|
|
117
|
+
获取多个节点的纯文本列表
|
|
118
|
+
|
|
119
|
+
参数:
|
|
120
|
+
xpath_or_css: XPath或CSS选择器
|
|
121
|
+
join_str: 单个节点内文本拼接分隔符
|
|
69
122
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
123
|
+
返回:
|
|
124
|
+
纯文本列表(每个元素对应一个节点的文本)
|
|
125
|
+
"""
|
|
126
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
127
|
+
result = []
|
|
128
|
+
for element in elements:
|
|
129
|
+
texts = element.xpath('.//text()').getall()
|
|
130
|
+
clean_text = join_str.join(text.strip() for text in texts if text.strip())
|
|
131
|
+
if clean_text:
|
|
132
|
+
result.append(clean_text)
|
|
133
|
+
return result
|
|
74
134
|
|
|
75
|
-
def re_search(self, pattern, flags=re.DOTALL):
|
|
135
|
+
def re_search(self, pattern: str, flags: int = re.DOTALL) -> Optional[re.Match]:
|
|
136
|
+
"""在响应文本上执行正则表达式搜索。"""
|
|
137
|
+
if not isinstance(pattern, str):
|
|
138
|
+
raise TypeError("Pattern must be a string")
|
|
76
139
|
return re.search(pattern, self.text, flags=flags)
|
|
77
140
|
|
|
78
|
-
def re_findall(self, pattern, flags=re.DOTALL):
|
|
141
|
+
def re_findall(self, pattern: str, flags: int = re.DOTALL) -> List[Any]:
|
|
142
|
+
"""在响应文本上执行正则表达式查找。"""
|
|
143
|
+
if not isinstance(pattern, str):
|
|
144
|
+
raise TypeError("Pattern must be a string")
|
|
79
145
|
return re.findall(pattern, self.text, flags=flags)
|
|
80
146
|
|
|
81
|
-
def get_cookies(self):
|
|
82
|
-
|
|
147
|
+
def get_cookies(self) -> Dict[str, str]:
|
|
148
|
+
"""从响应头中解析并返回Cookies。"""
|
|
149
|
+
cookie_header = self.headers.get("Set-Cookie", "")
|
|
150
|
+
if isinstance(cookie_header, list):
|
|
151
|
+
cookie_header = ", ".join(cookie_header)
|
|
83
152
|
cookies = SimpleCookie()
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
return {k: v.value for k, v in cookies.items()}
|
|
153
|
+
cookies.load(cookie_header)
|
|
154
|
+
return {key: morsel.value for key, morsel in cookies.items()}
|
|
87
155
|
|
|
88
156
|
@property
|
|
89
|
-
def meta(self):
|
|
90
|
-
|
|
157
|
+
def meta(self) -> Dict:
|
|
158
|
+
"""获取关联的 Request 对象的 meta 字典。"""
|
|
159
|
+
return self.request.meta if self.request else {}
|
|
91
160
|
|
|
92
161
|
def __str__(self):
|
|
93
|
-
return f"{self.
|
|
162
|
+
return f"<{self.status_code} {self.url}>"
|
|
@@ -1,20 +1,40 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
3
5
|
from crawlo import Item
|
|
4
6
|
from crawlo.spider import Spider
|
|
5
7
|
from crawlo.utils.log import get_logger
|
|
6
8
|
|
|
7
9
|
|
|
8
|
-
class
|
|
10
|
+
class ConsolePipeline:
|
|
11
|
+
"""将Item内容输出到控制台的管道"""
|
|
9
12
|
|
|
10
|
-
def __init__(self,
|
|
11
|
-
self.logger =
|
|
13
|
+
def __init__(self, log_level: str = "DEBUG"):
|
|
14
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
15
|
|
|
13
16
|
@classmethod
|
|
14
|
-
def
|
|
15
|
-
|
|
16
|
-
return cls(
|
|
17
|
+
def from_crawler(cls, crawler):
|
|
18
|
+
"""从crawler实例创建管道"""
|
|
19
|
+
return cls(
|
|
20
|
+
log_level=crawler.settings.get('LOG_LEVEL', 'DEBUG')
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
async def process_item(self, item: Item, spider: Spider) -> Item:
|
|
19
|
-
|
|
20
|
-
|
|
24
|
+
"""处理Item并输出到日志"""
|
|
25
|
+
try:
|
|
26
|
+
item_dict = self._convert_to_serializable(item)
|
|
27
|
+
self.logger.info(f"Item processed: {item_dict}")
|
|
28
|
+
return item
|
|
29
|
+
except Exception as e:
|
|
30
|
+
self.logger.error(f"Error processing item: {e}", exc_info=True)
|
|
31
|
+
raise
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def _convert_to_serializable(item: Item) -> Dict[str, Any]:
|
|
35
|
+
"""将Item转换为可序列化的字典"""
|
|
36
|
+
try:
|
|
37
|
+
return item.to_dict()
|
|
38
|
+
except AttributeError:
|
|
39
|
+
# 兼容没有to_dict方法的Item实现
|
|
40
|
+
return dict(item)
|
|
@@ -1,5 +1,117 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from motor.motor_asyncio import AsyncIOMotorClient
|
|
4
|
+
from pymongo.errors import PyMongoError
|
|
5
|
+
from crawlo.utils.log import get_logger
|
|
6
|
+
from crawlo.exceptions import ItemDiscard
|
|
2
7
|
|
|
3
8
|
|
|
4
|
-
class MongoPipeline
|
|
5
|
-
|
|
9
|
+
class MongoPipeline:
|
|
10
|
+
def __init__(self, crawler):
|
|
11
|
+
self.crawler = crawler
|
|
12
|
+
self.settings = crawler.settings
|
|
13
|
+
self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
|
|
14
|
+
|
|
15
|
+
# 初始化连接参数
|
|
16
|
+
self.client = None
|
|
17
|
+
self.db = None
|
|
18
|
+
self.collection = None
|
|
19
|
+
|
|
20
|
+
# 配置默认值
|
|
21
|
+
self.mongo_uri = self.settings.get('MONGO_URI', 'mongodb://localhost:27017')
|
|
22
|
+
self.db_name = self.settings.get('MONGO_DATABASE', 'scrapy_db')
|
|
23
|
+
self.collection_name = self.settings.get('MONGO_COLLECTION', crawler.spider.name)
|
|
24
|
+
|
|
25
|
+
# 注册关闭事件
|
|
26
|
+
crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def from_crawler(cls, crawler):
|
|
30
|
+
return cls(crawler)
|
|
31
|
+
|
|
32
|
+
async def _ensure_connection(self):
|
|
33
|
+
"""确保连接已建立"""
|
|
34
|
+
if self.client is None:
|
|
35
|
+
self.client = AsyncIOMotorClient(self.mongo_uri)
|
|
36
|
+
self.db = self.client[self.db_name]
|
|
37
|
+
self.collection = self.db[self.collection_name]
|
|
38
|
+
self.logger.info(f"MongoDB连接建立 (集合: {self.collection_name})")
|
|
39
|
+
|
|
40
|
+
async def process_item(self, item, spider) -> Optional[dict]:
|
|
41
|
+
"""处理item的核心方法"""
|
|
42
|
+
try:
|
|
43
|
+
await self._ensure_connection()
|
|
44
|
+
|
|
45
|
+
item_dict = dict(item)
|
|
46
|
+
result = await self.collection.insert_one(item_dict)
|
|
47
|
+
|
|
48
|
+
# 统计计数
|
|
49
|
+
self.crawler.stats.inc_value('mongodb/inserted')
|
|
50
|
+
self.logger.debug(f"插入文档ID: {result.inserted_id}")
|
|
51
|
+
|
|
52
|
+
return item
|
|
53
|
+
|
|
54
|
+
except Exception as e:
|
|
55
|
+
self.crawler.stats.inc_value('mongodb/failed')
|
|
56
|
+
self.logger.error(f"MongoDB插入失败: {e}")
|
|
57
|
+
raise ItemDiscard(f"MongoDB操作失败: {e}")
|
|
58
|
+
|
|
59
|
+
async def spider_closed(self):
|
|
60
|
+
"""关闭爬虫时清理资源"""
|
|
61
|
+
if self.client:
|
|
62
|
+
self.client.close()
|
|
63
|
+
self.logger.info("MongoDB连接已关闭")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class MongoPoolPipeline:
|
|
67
|
+
def __init__(self, crawler):
|
|
68
|
+
self.crawler = crawler
|
|
69
|
+
self.settings = crawler.settings
|
|
70
|
+
self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
|
|
71
|
+
|
|
72
|
+
# 连接池配置
|
|
73
|
+
self.client = AsyncIOMotorClient(
|
|
74
|
+
self.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
|
|
75
|
+
maxPoolSize=self.settings.getint('MONGO_MAX_POOL_SIZE', 100),
|
|
76
|
+
minPoolSize=self.settings.getint('MONGO_MIN_POOL_SIZE', 10),
|
|
77
|
+
connectTimeoutMS=5000,
|
|
78
|
+
socketTimeoutMS=30000
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
self.db = self.client[self.settings.get('MONGO_DATABASE', 'scrapy_db')]
|
|
82
|
+
self.collection = self.db[self.settings.get('MONGO_COLLECTION', crawler.spider.name)]
|
|
83
|
+
|
|
84
|
+
crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
|
|
85
|
+
self.logger.info(f"MongoDB连接池已初始化 (集合: {self.collection.name})")
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def create_instance(cls, crawler):
|
|
89
|
+
return cls(crawler)
|
|
90
|
+
|
|
91
|
+
async def process_item(self, item, spider) -> Optional[dict]:
|
|
92
|
+
"""处理item方法(带重试机制)"""
|
|
93
|
+
try:
|
|
94
|
+
item_dict = dict(item)
|
|
95
|
+
|
|
96
|
+
# 带重试的插入操作
|
|
97
|
+
for attempt in range(3):
|
|
98
|
+
try:
|
|
99
|
+
result = await self.collection.insert_one(item_dict)
|
|
100
|
+
self.crawler.stats.inc_value('mongodb/insert_success')
|
|
101
|
+
self.logger.debug(f"插入成功 [attempt {attempt + 1}]: {result.inserted_id}")
|
|
102
|
+
return item
|
|
103
|
+
except PyMongoError as e:
|
|
104
|
+
if attempt == 2: # 最后一次尝试仍失败
|
|
105
|
+
raise
|
|
106
|
+
self.logger.warning(f"插入重试中 [attempt {attempt + 1}]: {e}")
|
|
107
|
+
|
|
108
|
+
except Exception as e:
|
|
109
|
+
self.crawler.stats.inc_value('mongodb/insert_failed')
|
|
110
|
+
self.logger.error(f"MongoDB操作最终失败: {e}")
|
|
111
|
+
raise ItemDiscard(f"MongoDB操作失败: {e}")
|
|
112
|
+
|
|
113
|
+
async def spider_closed(self):
|
|
114
|
+
"""资源清理"""
|
|
115
|
+
if hasattr(self, 'client'):
|
|
116
|
+
self.client.close()
|
|
117
|
+
self.logger.info("MongoDB连接池已释放")
|