crawlo 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +9 -6
- crawlo/__version__.py +1 -2
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +158 -158
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +57 -59
- crawlo/crawler.py +242 -107
- crawlo/downloader/__init__.py +78 -78
- crawlo/downloader/aiohttp_downloader.py +259 -96
- crawlo/downloader/httpx_downloader.py +187 -48
- crawlo/downloader/playwright_downloader.py +160 -160
- crawlo/event.py +11 -11
- crawlo/exceptions.py +64 -64
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/filters/__init__.py +37 -37
- crawlo/filters/aioredis_filter.py +157 -129
- crawlo/filters/memory_filter.py +202 -203
- crawlo/filters/redis_filter.py +119 -119
- crawlo/items/__init__.py +62 -62
- crawlo/items/items.py +118 -118
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +140 -140
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +90 -89
- crawlo/network/__init__.py +7 -7
- crawlo/network/request.py +205 -155
- crawlo/network/response.py +166 -93
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_batch_pipline.py +133 -133
- crawlo/pipelines/mysql_pipeline.py +195 -176
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +93 -89
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +36 -36
- crawlo/stats_collector.py +59 -47
- crawlo/subscriber.py +106 -27
- crawlo/task_manager.py +27 -27
- crawlo/templates/item_template.tmpl +21 -21
- crawlo/templates/project_template/main.py +32 -32
- crawlo/templates/project_template/setting.py +189 -189
- crawlo/templates/spider_template.tmpl +30 -30
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/concurrency_manager.py +125 -0
- crawlo/utils/date_tools.py +177 -177
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/log.py +39 -39
- crawlo/utils/pqueue.py +173 -173
- crawlo/utils/project.py +59 -59
- crawlo/utils/request.py +122 -85
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +303 -0
- crawlo/utils/url.py +39 -39
- {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/METADATA +48 -36
- crawlo-1.0.3.dist-info/RECORD +80 -0
- {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/top_level.txt +1 -0
- tests/__init__.py +7 -0
- tests/baidu_spider/__init__.py +7 -0
- tests/baidu_spider/demo.py +94 -0
- tests/baidu_spider/items.py +25 -0
- tests/baidu_spider/middleware.py +49 -0
- tests/baidu_spider/pipeline.py +55 -0
- tests/baidu_spider/request_fingerprints.txt +9 -0
- tests/baidu_spider/run.py +27 -0
- tests/baidu_spider/settings.py +78 -0
- tests/baidu_spider/spiders/__init__.py +7 -0
- tests/baidu_spider/spiders/bai_du.py +61 -0
- tests/baidu_spider/spiders/sina.py +79 -0
- crawlo-1.0.1.dist-info/RECORD +0 -67
- crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
- {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/WHEEL +0 -0
- {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/entry_points.txt +0 -0
crawlo/network/request.py
CHANGED
|
@@ -1,155 +1,205 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
import
|
|
4
|
-
from copy import deepcopy
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
#
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
#
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
#
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
#
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
#
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
#
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
import json
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from urllib.parse import urlencode
|
|
6
|
+
from w3lib.url import safe_url_string
|
|
7
|
+
from typing import Dict, Optional, Callable, Union, Any, TypeVar, List
|
|
8
|
+
|
|
9
|
+
from crawlo.utils.url import escape_ajax
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
_Request = TypeVar("_Request", bound="Request")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class RequestPriority:
|
|
16
|
+
"""请求优先级常量"""
|
|
17
|
+
HIGH = -100
|
|
18
|
+
NORMAL = 0
|
|
19
|
+
LOW = 100
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Request:
|
|
23
|
+
"""
|
|
24
|
+
封装一个 HTTP 请求对象,用于爬虫框架中表示一个待抓取的请求任务。
|
|
25
|
+
支持 JSON、表单、原始 body 提交,自动处理 Content-Type 与编码。
|
|
26
|
+
不支持文件上传(multipart/form-data),保持轻量。
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
__slots__ = (
|
|
30
|
+
'_url',
|
|
31
|
+
'_meta',
|
|
32
|
+
'callback',
|
|
33
|
+
'cb_kwargs',
|
|
34
|
+
'err_back',
|
|
35
|
+
'headers',
|
|
36
|
+
'body',
|
|
37
|
+
'method',
|
|
38
|
+
'cookies',
|
|
39
|
+
'priority',
|
|
40
|
+
'encoding',
|
|
41
|
+
'dont_filter',
|
|
42
|
+
'timeout',
|
|
43
|
+
'proxy',
|
|
44
|
+
'allow_redirects',
|
|
45
|
+
'auth',
|
|
46
|
+
'verify',
|
|
47
|
+
'flags',
|
|
48
|
+
# 保留高层参数用于 copy()
|
|
49
|
+
'_json_body',
|
|
50
|
+
'_form_data'
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
url: str,
|
|
56
|
+
callback: Optional[Callable] = None,
|
|
57
|
+
method: Optional[str] = 'GET',
|
|
58
|
+
headers: Optional[Dict[str, str]] = None,
|
|
59
|
+
body: Optional[Union[bytes, str, Dict[Any, Any]]] = None,
|
|
60
|
+
form_data: Optional[Dict[Any, Any]] = None,
|
|
61
|
+
json_body: Optional[Dict[Any, Any]] = None,
|
|
62
|
+
cb_kwargs: Optional[Dict[str, Any]] = None,
|
|
63
|
+
cookies: Optional[Dict[str, str]] = None,
|
|
64
|
+
meta: Optional[Dict[str, Any]] = None,
|
|
65
|
+
priority: int = RequestPriority.NORMAL,
|
|
66
|
+
dont_filter: bool = False,
|
|
67
|
+
timeout: Optional[float] = None,
|
|
68
|
+
proxy: Optional[str] = None,
|
|
69
|
+
allow_redirects: bool = True,
|
|
70
|
+
auth: Optional[tuple] = None,
|
|
71
|
+
verify: bool = True,
|
|
72
|
+
flags: Optional[List[str]] = None,
|
|
73
|
+
encoding: str = 'utf-8'
|
|
74
|
+
):
|
|
75
|
+
"""
|
|
76
|
+
初始化请求对象。
|
|
77
|
+
|
|
78
|
+
:param url: 请求 URL(必须)
|
|
79
|
+
:param callback: 成功回调函数
|
|
80
|
+
:param method: HTTP 方法,默认 GET
|
|
81
|
+
:param headers: 请求头
|
|
82
|
+
:param body: 原始请求体(bytes/str),若为 dict 且未使用 json_body/form_data,则自动转为 JSON
|
|
83
|
+
:param form_data: 表单数据,自动转为 application/x-www-form-urlencoded
|
|
84
|
+
:param json_body: JSON 数据,自动序列化并设置 Content-Type
|
|
85
|
+
:param cb_kwargs: 传递给 callback 的额外参数
|
|
86
|
+
:param cookies: Cookies 字典
|
|
87
|
+
:param meta: 元数据(跨中间件传递数据)
|
|
88
|
+
:param priority: 优先级(数值越小越优先)
|
|
89
|
+
:param dont_filter: 是否跳过去重
|
|
90
|
+
:param timeout: 超时时间(秒)
|
|
91
|
+
:param proxy: 代理地址,如 http://127.0.0.1:8080
|
|
92
|
+
:param allow_redirects: 是否允许重定向
|
|
93
|
+
:param auth: 认证元组 (username, password)
|
|
94
|
+
:param verify: 是否验证 SSL 证书
|
|
95
|
+
:param flags: 标记(用于调试或分类)
|
|
96
|
+
:param encoding: 字符编码,默认 utf-8
|
|
97
|
+
"""
|
|
98
|
+
self.callback = callback
|
|
99
|
+
self.method = str(method).upper()
|
|
100
|
+
self.headers = headers or {}
|
|
101
|
+
self.cookies = cookies or {}
|
|
102
|
+
self.priority = -priority # 用于排序:值越小优先级越高
|
|
103
|
+
self._meta = deepcopy(meta) if meta is not None else {}
|
|
104
|
+
self.timeout = self._meta.get('download_timeout', timeout)
|
|
105
|
+
self.proxy = proxy
|
|
106
|
+
self.allow_redirects = allow_redirects
|
|
107
|
+
self.auth = auth
|
|
108
|
+
self.verify = verify
|
|
109
|
+
self.flags = flags or []
|
|
110
|
+
self.encoding = encoding
|
|
111
|
+
self.cb_kwargs = cb_kwargs or {}
|
|
112
|
+
self.body = body
|
|
113
|
+
# 保存高层语义参数(用于 copy)
|
|
114
|
+
self._json_body = json_body
|
|
115
|
+
self._form_data = form_data
|
|
116
|
+
|
|
117
|
+
# 构建 body
|
|
118
|
+
if json_body is not None:
|
|
119
|
+
if 'Content-Type' not in self.headers:
|
|
120
|
+
self.headers['Content-Type'] = 'application/json'
|
|
121
|
+
self.body = json.dumps(json_body, ensure_ascii=False).encode(encoding)
|
|
122
|
+
if self.method == 'GET':
|
|
123
|
+
self.method = 'POST'
|
|
124
|
+
|
|
125
|
+
elif form_data is not None:
|
|
126
|
+
if self.method == 'GET':
|
|
127
|
+
self.method = 'POST'
|
|
128
|
+
if 'Content-Type' not in self.headers:
|
|
129
|
+
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
|
130
|
+
query_str = urlencode(form_data)
|
|
131
|
+
self.body = query_str.encode(encoding) # ✅ 显式编码为 bytes
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
else:
|
|
135
|
+
# 处理原始 body
|
|
136
|
+
if isinstance(self.body, dict):
|
|
137
|
+
if 'Content-Type' not in self.headers:
|
|
138
|
+
self.headers['Content-Type'] = 'application/json'
|
|
139
|
+
self.body = json.dumps(self.body, ensure_ascii=False).encode(encoding)
|
|
140
|
+
elif isinstance(self.body, str):
|
|
141
|
+
self.body = self.body.encode(encoding)
|
|
142
|
+
|
|
143
|
+
self.dont_filter = dont_filter
|
|
144
|
+
self._set_url(url)
|
|
145
|
+
|
|
146
|
+
def copy(self: _Request) -> _Request:
|
|
147
|
+
"""
|
|
148
|
+
创建当前请求的副本,保留所有高层语义(json_body/form_data)。
|
|
149
|
+
"""
|
|
150
|
+
return type(self)(
|
|
151
|
+
url=self.url,
|
|
152
|
+
callback=self.callback,
|
|
153
|
+
method=self.method,
|
|
154
|
+
headers=self.headers.copy(),
|
|
155
|
+
body=None, # 由 form_data/json_body 重新生成
|
|
156
|
+
form_data=self._form_data,
|
|
157
|
+
json_body=self._json_body,
|
|
158
|
+
cb_kwargs=deepcopy(self.cb_kwargs),
|
|
159
|
+
err_back=self.err_back,
|
|
160
|
+
cookies=self.cookies.copy(),
|
|
161
|
+
meta=deepcopy(self._meta),
|
|
162
|
+
priority=-self.priority,
|
|
163
|
+
dont_filter=self.dont_filter,
|
|
164
|
+
timeout=self.timeout,
|
|
165
|
+
proxy=self.proxy,
|
|
166
|
+
allow_redirects=self.allow_redirects,
|
|
167
|
+
auth=self.auth,
|
|
168
|
+
verify=self.verify,
|
|
169
|
+
flags=self.flags.copy(),
|
|
170
|
+
encoding=self.encoding
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
def set_meta(self, key: str, value: Any) -> None:
|
|
174
|
+
"""设置 meta 中的某个键值。"""
|
|
175
|
+
self._meta[key] = value
|
|
176
|
+
|
|
177
|
+
def _set_url(self, url: str) -> None:
|
|
178
|
+
"""安全设置 URL,确保格式正确。"""
|
|
179
|
+
if not isinstance(url, str):
|
|
180
|
+
raise TypeError(f"Request url 必须为字符串,当前类型: {type(url).__name__}")
|
|
181
|
+
|
|
182
|
+
s = safe_url_string(url, self.encoding)
|
|
183
|
+
escaped_url = escape_ajax(s)
|
|
184
|
+
self._url = escaped_url
|
|
185
|
+
|
|
186
|
+
if not self._url.startswith(('http://', 'https://')):
|
|
187
|
+
raise ValueError(f"URL 缺少 scheme: {self._url}")
|
|
188
|
+
|
|
189
|
+
@property
|
|
190
|
+
def url(self) -> str:
|
|
191
|
+
return self._url
|
|
192
|
+
|
|
193
|
+
@property
|
|
194
|
+
def meta(self) -> Dict[str, Any]:
|
|
195
|
+
return self._meta
|
|
196
|
+
|
|
197
|
+
def __str__(self) -> str:
|
|
198
|
+
return f'<Request url={self.url} method={self.method}>'
|
|
199
|
+
|
|
200
|
+
def __repr__(self) -> str:
|
|
201
|
+
return str(self)
|
|
202
|
+
|
|
203
|
+
def __lt__(self, other: _Request) -> bool:
|
|
204
|
+
"""用于按优先级排序"""
|
|
205
|
+
return self.priority < other.priority
|
crawlo/network/response.py
CHANGED
|
@@ -1,93 +1,166 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
import re
|
|
4
|
-
import ujson
|
|
5
|
-
from typing import Dict
|
|
6
|
-
from parsel import Selector
|
|
7
|
-
from http.cookies import SimpleCookie
|
|
8
|
-
from urllib.parse import urljoin as _urljoin
|
|
9
|
-
|
|
10
|
-
from crawlo import Request
|
|
11
|
-
from crawlo.exceptions import DecodeError
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class Response
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
self.
|
|
30
|
-
self.
|
|
31
|
-
self.
|
|
32
|
-
self.
|
|
33
|
-
self.
|
|
34
|
-
self.
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
#
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
return self.
|
|
74
|
-
|
|
75
|
-
def
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
import re
|
|
4
|
+
import ujson
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from parsel import Selector, SelectorList
|
|
7
|
+
from http.cookies import SimpleCookie
|
|
8
|
+
from urllib.parse import urljoin as _urljoin
|
|
9
|
+
|
|
10
|
+
from crawlo import Request
|
|
11
|
+
from crawlo.exceptions import DecodeError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Response:
|
|
15
|
+
"""
|
|
16
|
+
HTTP响应的封装,提供数据解析的便捷方法。
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
url: str,
|
|
22
|
+
*,
|
|
23
|
+
headers: Dict[str, Any],
|
|
24
|
+
body: bytes = b"",
|
|
25
|
+
method: str = 'GET',
|
|
26
|
+
request: Request = None,
|
|
27
|
+
status_code: int = 200,
|
|
28
|
+
):
|
|
29
|
+
self.url = url
|
|
30
|
+
self.headers = headers
|
|
31
|
+
self.body = body
|
|
32
|
+
self.method = method
|
|
33
|
+
self.request = request
|
|
34
|
+
self.status_code = status_code
|
|
35
|
+
self.encoding = self.request.encoding if self.request else None
|
|
36
|
+
self._text_cache = None
|
|
37
|
+
self._json_cache = None
|
|
38
|
+
self._selector_instance = None # 修改变量名,避免与 @property 冲突
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def text(self) -> str:
|
|
42
|
+
"""将响应体(body)以正确的编码解码为字符串,并缓存结果。"""
|
|
43
|
+
if self._text_cache is not None:
|
|
44
|
+
return self._text_cache
|
|
45
|
+
|
|
46
|
+
encoding = self.encoding
|
|
47
|
+
try:
|
|
48
|
+
# 优先使用 request 提供的编码
|
|
49
|
+
if encoding:
|
|
50
|
+
self._text_cache = self.body.decode(encoding)
|
|
51
|
+
return self._text_cache
|
|
52
|
+
|
|
53
|
+
# 从 Content-Type 中提取编码
|
|
54
|
+
content_type = self.headers.get("Content-Type", "")
|
|
55
|
+
charset_match = re.search(r"charset=([\w-]+)", content_type, re.I)
|
|
56
|
+
if charset_match:
|
|
57
|
+
encoding = charset_match.group(1)
|
|
58
|
+
self._text_cache = self.body.decode(encoding)
|
|
59
|
+
return self._text_cache
|
|
60
|
+
|
|
61
|
+
# 默认尝试 UTF-8
|
|
62
|
+
self._text_cache = self.body.decode("utf-8")
|
|
63
|
+
return self._text_cache
|
|
64
|
+
|
|
65
|
+
except UnicodeDecodeError as e:
|
|
66
|
+
raise DecodeError(f"Failed to decode response from {self.url}: {e}")
|
|
67
|
+
|
|
68
|
+
def json(self) -> Any:
|
|
69
|
+
"""将响应文本解析为 JSON 对象。"""
|
|
70
|
+
if self._json_cache:
|
|
71
|
+
return self._json_cache
|
|
72
|
+
self._json_cache = ujson.loads(self.text)
|
|
73
|
+
return self._json_cache
|
|
74
|
+
|
|
75
|
+
def urljoin(self, url: str) -> str:
|
|
76
|
+
"""拼接 URL,自动处理相对路径。"""
|
|
77
|
+
return _urljoin(self.url, url)
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def _selector(self) -> Selector:
|
|
81
|
+
"""懒加载 Selector 实例"""
|
|
82
|
+
if self._selector_instance is None:
|
|
83
|
+
self._selector_instance = Selector(self.text)
|
|
84
|
+
return self._selector_instance
|
|
85
|
+
|
|
86
|
+
def xpath(self, query: str) -> SelectorList:
|
|
87
|
+
"""使用 XPath 选择器查询文档。"""
|
|
88
|
+
return self._selector.xpath(query)
|
|
89
|
+
|
|
90
|
+
def css(self, query: str) -> SelectorList:
|
|
91
|
+
"""使用 CSS 选择器查询文档。"""
|
|
92
|
+
return self._selector.css(query)
|
|
93
|
+
|
|
94
|
+
def xpath_text(self, query: str) -> str:
|
|
95
|
+
"""使用 XPath 提取并返回纯文本。"""
|
|
96
|
+
fragments = self.xpath(f"{query}//text()").getall()
|
|
97
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
98
|
+
|
|
99
|
+
def css_text(self, query: str) -> str:
|
|
100
|
+
"""使用 CSS 选择器提取并返回纯文本。"""
|
|
101
|
+
fragments = self.css(f"{query} ::text").getall()
|
|
102
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
103
|
+
|
|
104
|
+
def get_text(self, xpath_or_css: str, join_str: str = " ") -> str:
|
|
105
|
+
"""
|
|
106
|
+
获取指定节点的纯文本(自动拼接子节点文本)
|
|
107
|
+
|
|
108
|
+
参数:
|
|
109
|
+
xpath_or_css: XPath或CSS选择器
|
|
110
|
+
join_str: 文本拼接分隔符(默认为空格)
|
|
111
|
+
|
|
112
|
+
返回:
|
|
113
|
+
拼接后的纯文本字符串
|
|
114
|
+
"""
|
|
115
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
116
|
+
texts = elements.xpath('.//text()').getall()
|
|
117
|
+
return join_str.join(text.strip() for text in texts if text.strip())
|
|
118
|
+
|
|
119
|
+
def get_all_text(self, xpath_or_css: str, join_str: str = " ") -> List[str]:
|
|
120
|
+
"""
|
|
121
|
+
获取多个节点的纯文本列表
|
|
122
|
+
|
|
123
|
+
参数:
|
|
124
|
+
xpath_or_css: XPath或CSS选择器
|
|
125
|
+
join_str: 单个节点内文本拼接分隔符
|
|
126
|
+
|
|
127
|
+
返回:
|
|
128
|
+
纯文本列表(每个元素对应一个节点的文本)
|
|
129
|
+
"""
|
|
130
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
131
|
+
result = []
|
|
132
|
+
for element in elements:
|
|
133
|
+
texts = element.xpath('.//text()').getall()
|
|
134
|
+
clean_text = join_str.join(text.strip() for text in texts if text.strip())
|
|
135
|
+
if clean_text:
|
|
136
|
+
result.append(clean_text)
|
|
137
|
+
return result
|
|
138
|
+
|
|
139
|
+
def re_search(self, pattern: str, flags: int = re.DOTALL) -> Optional[re.Match]:
|
|
140
|
+
"""在响应文本上执行正则表达式搜索。"""
|
|
141
|
+
if not isinstance(pattern, str):
|
|
142
|
+
raise TypeError("Pattern must be a string")
|
|
143
|
+
return re.search(pattern, self.text, flags=flags)
|
|
144
|
+
|
|
145
|
+
def re_findall(self, pattern: str, flags: int = re.DOTALL) -> List[Any]:
|
|
146
|
+
"""在响应文本上执行正则表达式查找。"""
|
|
147
|
+
if not isinstance(pattern, str):
|
|
148
|
+
raise TypeError("Pattern must be a string")
|
|
149
|
+
return re.findall(pattern, self.text, flags=flags)
|
|
150
|
+
|
|
151
|
+
def get_cookies(self) -> Dict[str, str]:
|
|
152
|
+
"""从响应头中解析并返回Cookies。"""
|
|
153
|
+
cookie_header = self.headers.get("Set-Cookie", "")
|
|
154
|
+
if isinstance(cookie_header, list):
|
|
155
|
+
cookie_header = ", ".join(cookie_header)
|
|
156
|
+
cookies = SimpleCookie()
|
|
157
|
+
cookies.load(cookie_header)
|
|
158
|
+
return {key: morsel.value for key, morsel in cookies.items()}
|
|
159
|
+
|
|
160
|
+
@property
|
|
161
|
+
def meta(self) -> Dict:
|
|
162
|
+
"""获取关联的 Request 对象的 meta 字典。"""
|
|
163
|
+
return self.request.meta if self.request else {}
|
|
164
|
+
|
|
165
|
+
def __str__(self):
|
|
166
|
+
return f"<{self.status_code} {self.url}>"
|
crawlo/pipelines/__init__.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
from crawlo.items.items import Item
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class BasePipeline:
|
|
7
|
-
|
|
8
|
-
def process_item(self, item: Item, spider):
|
|
9
|
-
raise NotImplementedError
|
|
10
|
-
|
|
11
|
-
@classmethod
|
|
12
|
-
def create_instance(cls, crawler):
|
|
13
|
-
return cls()
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from crawlo.items.items import Item
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class BasePipeline:
|
|
7
|
+
|
|
8
|
+
def process_item(self, item: Item, spider):
|
|
9
|
+
raise NotImplementedError
|
|
10
|
+
|
|
11
|
+
@classmethod
|
|
12
|
+
def create_instance(cls, crawler):
|
|
13
|
+
return cls()
|