crawlo 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +1 -0
- crawlo/__version__.py +1 -1
- crawlo/core/engine.py +9 -7
- crawlo/core/processor.py +1 -1
- crawlo/core/scheduler.py +32 -8
- crawlo/downloader/playwright_downloader.py +161 -0
- crawlo/extension/log_stats.py +4 -4
- crawlo/filters/__init__.py +37 -0
- crawlo/filters/aioredis_filter.py +130 -0
- crawlo/filters/memory_filter.py +203 -0
- crawlo/filters/redis_filter.py +120 -0
- crawlo/items/__init__.py +40 -2
- crawlo/items/items.py +36 -5
- crawlo/middleware/retry.py +7 -2
- crawlo/network/request.py +121 -18
- crawlo/pipelines/console_pipeline.py +28 -8
- crawlo/pipelines/mongo_pipeline.py +114 -2
- crawlo/pipelines/mysql_batch_pipline.py +134 -0
- crawlo/pipelines/mysql_pipeline.py +173 -2
- crawlo/pipelines/pipeline_manager.py +3 -3
- crawlo/settings/default_settings.py +51 -1
- crawlo/spider/__init__.py +2 -2
- crawlo/utils/date_tools.py +165 -8
- crawlo/utils/func_tools.py +74 -14
- crawlo/utils/pqueue.py +166 -8
- crawlo/utils/project.py +3 -2
- crawlo/utils/request.py +85 -0
- crawlo/utils/url.py +40 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/METADATA +2 -2
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/RECORD +34 -26
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/WHEEL +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/licenses/LICENSE +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
from weakref import WeakSet
|
|
6
|
+
from typing import Set, TextIO, Optional
|
|
7
|
+
|
|
8
|
+
from crawlo import Request
|
|
9
|
+
from crawlo.filters import BaseFilter
|
|
10
|
+
from crawlo.utils.log import get_logger
|
|
11
|
+
from crawlo.utils.request import request_fingerprint
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MemoryFilter(BaseFilter):
|
|
15
|
+
"""基于内存的高效请求去重过滤器,适用于单机爬虫"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, crawler):
|
|
18
|
+
"""
|
|
19
|
+
初始化内存过滤器
|
|
20
|
+
|
|
21
|
+
:param crawler: 爬虫实例,用于获取配置
|
|
22
|
+
"""
|
|
23
|
+
self.fingerprints: Set[str] = set() # 主指纹存储
|
|
24
|
+
self._temp_weak_refs = WeakSet() # 弱引用临时存储(可选)
|
|
25
|
+
|
|
26
|
+
debug = crawler.settings.get_bool('FILTER_DEBUG', False)
|
|
27
|
+
logger = get_logger(
|
|
28
|
+
self.__class__.__name__, # 使用类名替代字符串
|
|
29
|
+
crawler.settings.get('LOG_LEVEL', 'INFO')
|
|
30
|
+
)
|
|
31
|
+
super().__init__(logger, crawler.stats, debug)
|
|
32
|
+
|
|
33
|
+
# 性能计数器
|
|
34
|
+
self._dupe_count = 0
|
|
35
|
+
self._unique_count = 0
|
|
36
|
+
|
|
37
|
+
def add_fingerprint(self, fp: str) -> None:
|
|
38
|
+
"""
|
|
39
|
+
添加请求指纹
|
|
40
|
+
|
|
41
|
+
:param fp: 请求指纹字符串
|
|
42
|
+
:raises TypeError: 如果指纹不是字符串类型
|
|
43
|
+
"""
|
|
44
|
+
if not isinstance(fp, str):
|
|
45
|
+
raise TypeError(f"指纹必须是字符串类型,得到 {type(fp)}")
|
|
46
|
+
|
|
47
|
+
self.fingerprints.add(fp)
|
|
48
|
+
self._unique_count += 1
|
|
49
|
+
# self.logger.debug(f"添加指纹: {fp[:10]}...") # 日志截断防止过长
|
|
50
|
+
|
|
51
|
+
def requested(self, request: Request) -> bool:
|
|
52
|
+
"""
|
|
53
|
+
检查请求是否重复(主要接口)
|
|
54
|
+
|
|
55
|
+
:param request: 请求对象
|
|
56
|
+
:return: 是否重复
|
|
57
|
+
"""
|
|
58
|
+
fp = request_fingerprint(request)
|
|
59
|
+
if fp in self:
|
|
60
|
+
self._dupe_count += 1
|
|
61
|
+
# self.logger.debug(f"发现重复请求: {fp[:10]}...")
|
|
62
|
+
return True
|
|
63
|
+
|
|
64
|
+
self.add_fingerprint(fp)
|
|
65
|
+
return False
|
|
66
|
+
|
|
67
|
+
def __contains__(self, item: str) -> bool:
|
|
68
|
+
"""
|
|
69
|
+
支持 in 操作符检查
|
|
70
|
+
|
|
71
|
+
:param item: 要检查的指纹
|
|
72
|
+
:return: 是否已存在
|
|
73
|
+
"""
|
|
74
|
+
return item in self.fingerprints
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def stats_summary(self) -> dict:
|
|
78
|
+
"""获取过滤器统计信息"""
|
|
79
|
+
return {
|
|
80
|
+
'capacity': len(self.fingerprints),
|
|
81
|
+
'duplicates': self._dupe_count,
|
|
82
|
+
'uniques': self._unique_count,
|
|
83
|
+
'memory_usage': self._estimate_memory()
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
def _estimate_memory(self) -> str:
|
|
87
|
+
"""估算内存使用量(近似值)"""
|
|
88
|
+
avg_item_size = sum(len(x) for x in self.fingerprints) / max(1, len(self.fingerprints))
|
|
89
|
+
total = len(self.fingerprints) * (avg_item_size + 50) # 50字节额外开销
|
|
90
|
+
return f"{total / (1024 * 1024):.2f} MB"
|
|
91
|
+
|
|
92
|
+
def clear(self) -> None:
|
|
93
|
+
"""清空所有指纹数据"""
|
|
94
|
+
self.fingerprints.clear()
|
|
95
|
+
self._dupe_count = 0
|
|
96
|
+
self._unique_count = 0
|
|
97
|
+
self.logger.info("已清空内存过滤器")
|
|
98
|
+
|
|
99
|
+
def close(self) -> None:
|
|
100
|
+
"""关闭过滤器(清理资源)"""
|
|
101
|
+
self.clear()
|
|
102
|
+
|
|
103
|
+
# 兼容旧版异步接口
|
|
104
|
+
async def closed(self):
|
|
105
|
+
"""兼容异步接口"""
|
|
106
|
+
self.close()
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class MemoryFileFilter(BaseFilter):
|
|
110
|
+
"""基于内存的请求指纹过滤器,支持原子化文件持久化"""
|
|
111
|
+
|
|
112
|
+
def __init__(self, crawler):
|
|
113
|
+
"""
|
|
114
|
+
初始化过滤器
|
|
115
|
+
:param crawler: Scrapy Crawler对象,用于获取配置
|
|
116
|
+
"""
|
|
117
|
+
self.fingerprints: Set[str] = set() # 主存储集合
|
|
118
|
+
self._lock = threading.RLock() # 线程安全锁
|
|
119
|
+
self._file: Optional[TextIO] = None # 文件句柄
|
|
120
|
+
|
|
121
|
+
debug = crawler.settings.get_bool("FILTER_DEBUG", False)
|
|
122
|
+
logger = get_logger(
|
|
123
|
+
self.__class__.__name__, # 使用类名作为日志标识
|
|
124
|
+
crawler.settings.get("LOG_LEVEL", "INFO")
|
|
125
|
+
)
|
|
126
|
+
super().__init__(logger, crawler.stats, debug)
|
|
127
|
+
|
|
128
|
+
# 初始化文件存储
|
|
129
|
+
request_dir = crawler.settings.get("REQUEST_DIR")
|
|
130
|
+
if request_dir:
|
|
131
|
+
self._init_file_store(request_dir)
|
|
132
|
+
|
|
133
|
+
def _init_file_store(self, request_dir: str) -> None:
|
|
134
|
+
"""原子化初始化文件存储"""
|
|
135
|
+
with self._lock:
|
|
136
|
+
try:
|
|
137
|
+
os.makedirs(request_dir, exist_ok=True)
|
|
138
|
+
file_path = os.path.join(request_dir, 'request_fingerprints.txt')
|
|
139
|
+
|
|
140
|
+
# 原子化操作:读取现有指纹
|
|
141
|
+
if os.path.exists(file_path):
|
|
142
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
143
|
+
self.fingerprints.update(
|
|
144
|
+
line.strip() for line in f
|
|
145
|
+
if line.strip()
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# 以追加模式打开文件
|
|
149
|
+
self._file = open(file_path, 'a+', encoding='utf-8')
|
|
150
|
+
self.logger.info(f"Initialized fingerprint file: {file_path}")
|
|
151
|
+
|
|
152
|
+
except Exception as e:
|
|
153
|
+
self.logger.error(f"Failed to init file store: {str(e)}")
|
|
154
|
+
raise
|
|
155
|
+
|
|
156
|
+
def add_fingerprint(self, fp: str) -> None:
|
|
157
|
+
"""
|
|
158
|
+
线程安全的指纹添加操作
|
|
159
|
+
:param fp: 请求指纹字符串
|
|
160
|
+
"""
|
|
161
|
+
with self._lock:
|
|
162
|
+
if fp not in self.fingerprints:
|
|
163
|
+
self.fingerprints.add(fp)
|
|
164
|
+
self._persist_fp(fp)
|
|
165
|
+
|
|
166
|
+
def _persist_fp(self, fp: str) -> None:
|
|
167
|
+
"""持久化指纹到文件(需在锁保护下调用)"""
|
|
168
|
+
if self._file:
|
|
169
|
+
try:
|
|
170
|
+
self._file.write(f"{fp}\n")
|
|
171
|
+
self._file.flush()
|
|
172
|
+
os.fsync(self._file.fileno()) # 确保写入磁盘
|
|
173
|
+
except IOError as e:
|
|
174
|
+
self.logger.error(f"Failed to persist fingerprint: {str(e)}")
|
|
175
|
+
|
|
176
|
+
def __contains__(self, item: str) -> bool:
|
|
177
|
+
"""
|
|
178
|
+
线程安全的指纹检查
|
|
179
|
+
:param item: 要检查的指纹
|
|
180
|
+
:return: 是否已存在
|
|
181
|
+
"""
|
|
182
|
+
with self._lock:
|
|
183
|
+
return item in self.fingerprints
|
|
184
|
+
|
|
185
|
+
def close(self) -> None:
|
|
186
|
+
"""安全关闭资源(同步方法)"""
|
|
187
|
+
with self._lock:
|
|
188
|
+
if self._file and not self._file.closed:
|
|
189
|
+
try:
|
|
190
|
+
self._file.flush()
|
|
191
|
+
os.fsync(self._file.fileno())
|
|
192
|
+
finally:
|
|
193
|
+
self._file.close()
|
|
194
|
+
self.logger.info(f"Closed fingerprint file: {self._file.name}")
|
|
195
|
+
|
|
196
|
+
def __del__(self):
|
|
197
|
+
"""析构函数双保险"""
|
|
198
|
+
self.close()
|
|
199
|
+
|
|
200
|
+
# 兼容异步接口
|
|
201
|
+
async def closed(self):
|
|
202
|
+
"""标准的关闭入口"""
|
|
203
|
+
self.close()
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
import redis
|
|
4
|
+
|
|
5
|
+
from crawlo import Request
|
|
6
|
+
from crawlo.filters import BaseFilter
|
|
7
|
+
from crawlo.utils.log import get_logger
|
|
8
|
+
from crawlo.utils.request import request_fingerprint
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RedisFilter(BaseFilter):
|
|
12
|
+
"""使用Redis集合实现的同步请求去重过滤器"""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
redis_key: str,
|
|
17
|
+
client: redis.Redis,
|
|
18
|
+
stats: dict,
|
|
19
|
+
debug: bool,
|
|
20
|
+
log_level: str,
|
|
21
|
+
save_fp: bool
|
|
22
|
+
):
|
|
23
|
+
"""
|
|
24
|
+
初始化过滤器
|
|
25
|
+
|
|
26
|
+
:param redis_key: Redis存储键名
|
|
27
|
+
:param client: redis客户端实例
|
|
28
|
+
:param stats: 统计字典
|
|
29
|
+
:param debug: 是否启用调试模式
|
|
30
|
+
:param log_level: 日志级别
|
|
31
|
+
:param save_fp: 是否保留指纹数据
|
|
32
|
+
"""
|
|
33
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
34
|
+
super().__init__(self.logger, stats, debug)
|
|
35
|
+
|
|
36
|
+
self.redis_key = redis_key
|
|
37
|
+
self.redis = client
|
|
38
|
+
self.save_fp = save_fp
|
|
39
|
+
|
|
40
|
+
@classmethod
|
|
41
|
+
def create_instance(cls, crawler) -> 'BaseFilter':
|
|
42
|
+
"""工厂方法创建实例"""
|
|
43
|
+
redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
|
|
44
|
+
decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
# 添加连接池配置
|
|
48
|
+
redis_client = redis.from_url(
|
|
49
|
+
redis_url,
|
|
50
|
+
decode_responses=decode_responses,
|
|
51
|
+
socket_timeout=5, # 超时设置
|
|
52
|
+
socket_connect_timeout=5,
|
|
53
|
+
max_connections=20 # 连接池大小
|
|
54
|
+
)
|
|
55
|
+
# 测试连接是否有效
|
|
56
|
+
redis_client.ping()
|
|
57
|
+
except redis.RedisError as e:
|
|
58
|
+
raise RuntimeError(f"Redis连接失败: {str(e)}")
|
|
59
|
+
|
|
60
|
+
return cls(
|
|
61
|
+
redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
|
|
62
|
+
client=redis_client,
|
|
63
|
+
stats=crawler.stats,
|
|
64
|
+
save_fp=crawler.settings.get_bool('SAVE_FP', False),
|
|
65
|
+
debug=crawler.settings.get_bool('FILTER_DEBUG', False),
|
|
66
|
+
log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
def requested(self, request: Request) -> bool:
|
|
70
|
+
"""
|
|
71
|
+
检查请求是否已存在
|
|
72
|
+
|
|
73
|
+
:param request: 请求对象
|
|
74
|
+
:return: 是否重复
|
|
75
|
+
"""
|
|
76
|
+
fp = request_fingerprint(request)
|
|
77
|
+
try:
|
|
78
|
+
if self.redis.sismember(self.redis_key, fp):
|
|
79
|
+
self.logger.debug(f"重复请求: {fp}")
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
self.add_fingerprint(fp)
|
|
83
|
+
return False
|
|
84
|
+
except redis.RedisError as e:
|
|
85
|
+
self.logger.error(f"Redis操作失败: {str(e)}")
|
|
86
|
+
raise
|
|
87
|
+
|
|
88
|
+
def add_fingerprint(self, fp: str) -> None:
|
|
89
|
+
"""添加指纹到Redis集合"""
|
|
90
|
+
try:
|
|
91
|
+
self.redis.sadd(self.redis_key, fp)
|
|
92
|
+
self.logger.debug(f"新增指纹: {fp}")
|
|
93
|
+
except redis.RedisError as e:
|
|
94
|
+
self.logger.error(f"指纹添加失败: {str(e)}")
|
|
95
|
+
raise
|
|
96
|
+
|
|
97
|
+
def __contains__(self, item) -> bool:
|
|
98
|
+
"""支持 in 操作符检查 (必须返回bool类型)"""
|
|
99
|
+
try:
|
|
100
|
+
# 显式将redis返回的0/1转换为bool
|
|
101
|
+
return bool(self.redis.sismember(self.redis_key, item))
|
|
102
|
+
except redis.RedisError as e:
|
|
103
|
+
self.logger.error(f"Redis查询失败: {str(e)}")
|
|
104
|
+
raise
|
|
105
|
+
|
|
106
|
+
def close(self) -> None:
|
|
107
|
+
"""同步清理方法(注意不是异步的closed)"""
|
|
108
|
+
if not self.save_fp:
|
|
109
|
+
try:
|
|
110
|
+
count = self.redis.delete(self.redis_key)
|
|
111
|
+
self.logger.info(f"已清理Redis键 {self.redis_key}, 删除数量: {count}")
|
|
112
|
+
except redis.RedisError as e:
|
|
113
|
+
self.logger.error(f"清理失败: {str(e)}")
|
|
114
|
+
finally:
|
|
115
|
+
# 同步客户端需要手动关闭连接池
|
|
116
|
+
self.redis.close()
|
|
117
|
+
|
|
118
|
+
async def closed(self):
|
|
119
|
+
"""兼容异步接口的同步实现"""
|
|
120
|
+
self.close()
|
crawlo/items/__init__.py
CHANGED
|
@@ -1,10 +1,48 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
from abc import ABCMeta
|
|
4
|
+
from typing import Any, Optional, Type
|
|
4
5
|
|
|
5
6
|
|
|
6
|
-
class Field
|
|
7
|
-
|
|
7
|
+
class Field:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
nullable: bool = True,
|
|
11
|
+
*,
|
|
12
|
+
default: Any = None,
|
|
13
|
+
field_type: Optional[Type] = None,
|
|
14
|
+
max_length: Optional[int] = None,
|
|
15
|
+
description: str = ""
|
|
16
|
+
):
|
|
17
|
+
self.nullable = nullable
|
|
18
|
+
self.default = default
|
|
19
|
+
self.field_type = field_type
|
|
20
|
+
self.max_length = max_length
|
|
21
|
+
self.description = description
|
|
22
|
+
|
|
23
|
+
def validate(self, value: Any, field_name: str = "") -> Any:
|
|
24
|
+
if value is None or (isinstance(value, str) and value.strip() == ""):
|
|
25
|
+
if self.default is not None:
|
|
26
|
+
return self.default
|
|
27
|
+
elif not self.nullable:
|
|
28
|
+
raise ValueError(
|
|
29
|
+
f"字段 '{field_name}' 不允许为空。"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
if value is not None and not (isinstance(value, str) and value.strip() == ""):
|
|
33
|
+
if self.field_type and not isinstance(value, self.field_type):
|
|
34
|
+
raise TypeError(
|
|
35
|
+
f"字段 '{field_name}' 类型错误:期望类型 {self.field_type}, 得到 {type(value)},值:{value!r}"
|
|
36
|
+
)
|
|
37
|
+
if self.max_length and len(str(value)) > self.max_length:
|
|
38
|
+
raise ValueError(
|
|
39
|
+
f"字段 '{field_name}' 长度超限:最大长度 {self.max_length},当前长度 {len(str(value))},值:{value!r}"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
return value
|
|
43
|
+
|
|
44
|
+
def __repr__(self):
|
|
45
|
+
return f"<Field required={self.nullable} type={self.field_type} default={self.default}>"
|
|
8
46
|
|
|
9
47
|
|
|
10
48
|
class ItemMeta(ABCMeta):
|
crawlo/items/items.py
CHANGED
|
@@ -21,13 +21,44 @@ class Item(MutableMapping, metaclass=ItemMeta):
|
|
|
21
21
|
|
|
22
22
|
self._values: Dict[str, Any] = {}
|
|
23
23
|
|
|
24
|
+
# 初始化字段,默认值填充
|
|
25
|
+
for field_name, field_obj in self.FIELDS.items():
|
|
26
|
+
if field_obj.default is not None:
|
|
27
|
+
self._values[field_name] = field_obj.default
|
|
28
|
+
|
|
29
|
+
# 覆盖默认值或设置新值
|
|
30
|
+
for key, value in kwargs.items():
|
|
31
|
+
self[key] = value
|
|
32
|
+
|
|
24
33
|
def __getitem__(self, item: str) -> Any:
|
|
25
34
|
return self._values[item]
|
|
26
35
|
|
|
36
|
+
# def __setitem__(self, key: str, value: Any) -> None:
|
|
37
|
+
# if key not in self.FIELDS:
|
|
38
|
+
# raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
|
|
39
|
+
# self._values[key] = value
|
|
40
|
+
|
|
27
41
|
def __setitem__(self, key: str, value: Any) -> None:
|
|
28
42
|
if key not in self.FIELDS:
|
|
29
43
|
raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
|
|
30
|
-
|
|
44
|
+
|
|
45
|
+
field = self.FIELDS[key]
|
|
46
|
+
try:
|
|
47
|
+
validated_value = field.validate(value, field_name=key)
|
|
48
|
+
self._values[key] = validated_value
|
|
49
|
+
except Exception as e:
|
|
50
|
+
error_lines = [
|
|
51
|
+
"",
|
|
52
|
+
"【字段校验失败】",
|
|
53
|
+
f"字段名称: {key}",
|
|
54
|
+
f"数据类型: {type(value)}",
|
|
55
|
+
f"原始值: {repr(value)}",
|
|
56
|
+
f"是否允许空值: {field.nullable}",
|
|
57
|
+
f"错误原因: {str(e)}",
|
|
58
|
+
""
|
|
59
|
+
]
|
|
60
|
+
detailed_error = "\n".join(error_lines)
|
|
61
|
+
raise type(e)(detailed_error) from e
|
|
31
62
|
|
|
32
63
|
def __delitem__(self, key: str) -> None:
|
|
33
64
|
del self._values[key]
|
|
@@ -78,11 +109,11 @@ class Item(MutableMapping, metaclass=ItemMeta):
|
|
|
78
109
|
|
|
79
110
|
if __name__ == '__main__':
|
|
80
111
|
class TestItem(Item):
|
|
81
|
-
url = Field()
|
|
82
|
-
title = Field()
|
|
112
|
+
url = Field(nullable=False, field_type=str, max_length=100)
|
|
113
|
+
title = Field(default="无标题", field_type=str)
|
|
83
114
|
|
|
84
115
|
test_item = TestItem()
|
|
85
116
|
test_item['title'] = '百度首页'
|
|
86
|
-
test_item['url'] = '
|
|
117
|
+
test_item['url'] = 'hhh'
|
|
87
118
|
# test_item.title = 'fffff'
|
|
88
|
-
print(test_item
|
|
119
|
+
print(test_item)
|
crawlo/middleware/retry.py
CHANGED
|
@@ -36,12 +36,14 @@ class RetryMiddleware(object):
|
|
|
36
36
|
ignore_http_codes: List,
|
|
37
37
|
max_retry_times: int,
|
|
38
38
|
retry_exceptions: List,
|
|
39
|
-
stats: StatsCollector
|
|
39
|
+
stats: StatsCollector,
|
|
40
|
+
retry_priority: int
|
|
40
41
|
):
|
|
41
42
|
self.retry_http_codes = retry_http_codes
|
|
42
43
|
self.ignore_http_codes = ignore_http_codes
|
|
43
44
|
self.max_retry_times = max_retry_times
|
|
44
45
|
self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
|
|
46
|
+
self.retry_priority = retry_priority
|
|
45
47
|
self.stats = stats
|
|
46
48
|
self.logger = get_logger(self.__class__.__name__)
|
|
47
49
|
|
|
@@ -52,7 +54,8 @@ class RetryMiddleware(object):
|
|
|
52
54
|
ignore_http_codes=crawler.settings.get_list('IGNORE_HTTP_CODES'),
|
|
53
55
|
max_retry_times=crawler.settings.get_int('MAX_RETRY_TIMES'),
|
|
54
56
|
retry_exceptions=crawler.settings.get_list('RETRY_EXCEPTIONS'),
|
|
55
|
-
stats=crawler.stats
|
|
57
|
+
stats=crawler.stats,
|
|
58
|
+
retry_priority=crawler.settings.get_int('RETRY_PRIORITY')
|
|
56
59
|
)
|
|
57
60
|
return o
|
|
58
61
|
|
|
@@ -77,6 +80,8 @@ class RetryMiddleware(object):
|
|
|
77
80
|
retry_times += 1
|
|
78
81
|
self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
|
|
79
82
|
request.meta['retry_times'] = retry_times
|
|
83
|
+
request.dont_retry = True
|
|
84
|
+
request.retry_priority = request.priority + self.retry_priority
|
|
80
85
|
self.stats.inc_value("retry_count")
|
|
81
86
|
return request
|
|
82
87
|
else:
|
crawlo/network/request.py
CHANGED
|
@@ -2,7 +2,10 @@
|
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
import hashlib
|
|
4
4
|
from copy import deepcopy
|
|
5
|
-
from
|
|
5
|
+
from w3lib.url import safe_url_string
|
|
6
|
+
from typing import Dict, Optional, Callable, Union, Any
|
|
7
|
+
|
|
8
|
+
from crawlo.utils.url import escape_ajax
|
|
6
9
|
|
|
7
10
|
|
|
8
11
|
class Request(object):
|
|
@@ -13,40 +16,140 @@ class Request(object):
|
|
|
13
16
|
*,
|
|
14
17
|
callback: Optional[Callable] = None,
|
|
15
18
|
headers: Optional[Dict[str, str]] = None,
|
|
16
|
-
body: Optional[bytes] = None,
|
|
19
|
+
body: Optional[Union[Dict, bytes, str]] = None,
|
|
17
20
|
method: Optional[str] = 'GET',
|
|
18
21
|
cookies: Optional[Dict[str, str]] = None,
|
|
19
22
|
priority: int = 0,
|
|
20
|
-
encoding: Optional[str] = '
|
|
21
|
-
meta: Optional[Dict[str,
|
|
22
|
-
|
|
23
|
+
encoding: Optional[str] = 'utf-8',
|
|
24
|
+
meta: Optional[Dict[str, Any]] = None,
|
|
25
|
+
dont_filter: bool = False,
|
|
26
|
+
timeout: Optional[float] = None,
|
|
27
|
+
proxy: Optional[str] = None,
|
|
28
|
+
allow_redirects: bool = True,
|
|
29
|
+
auth: Optional[tuple] = None,
|
|
30
|
+
verify: bool = True
|
|
23
31
|
):
|
|
24
|
-
|
|
32
|
+
# 先初始化基本属性
|
|
25
33
|
self.callback = callback
|
|
26
34
|
self.headers = headers if headers else {}
|
|
27
35
|
self.body = body
|
|
28
|
-
self.method = method
|
|
29
|
-
self.cookies = cookies
|
|
30
|
-
self.priority = priority
|
|
36
|
+
self.method = str(method).upper()
|
|
37
|
+
self.cookies = cookies if cookies else {}
|
|
38
|
+
self.priority = -priority
|
|
31
39
|
self.encoding = encoding
|
|
40
|
+
self.dont_filter = dont_filter
|
|
32
41
|
self._meta = meta if meta is not None else {}
|
|
42
|
+
self.timeout = timeout
|
|
43
|
+
self.proxy = proxy
|
|
44
|
+
self.allow_redirects = allow_redirects
|
|
45
|
+
self.auth = auth
|
|
46
|
+
self.verify = verify
|
|
47
|
+
|
|
48
|
+
# 最后处理URL,确保encoding等依赖属性已初始化
|
|
49
|
+
self._set_url(url)
|
|
33
50
|
|
|
34
51
|
def copy(self):
|
|
35
52
|
return deepcopy(self)
|
|
36
53
|
|
|
37
|
-
def
|
|
38
|
-
data = f"{self.url}{self.method}{self.body or b''}".encode()
|
|
39
|
-
return hashlib.sha256(data).hexdigest()
|
|
40
|
-
|
|
41
|
-
def set_meta(self, key: str, value: str):
|
|
54
|
+
def set_meta(self, key: str, value: Any) -> None:
|
|
42
55
|
self._meta[key] = value
|
|
43
56
|
|
|
57
|
+
def _set_url(self, url: str) -> None:
|
|
58
|
+
if not isinstance(url, str):
|
|
59
|
+
raise TypeError(f"Request url must be str, got {type(url).__name__}")
|
|
60
|
+
|
|
61
|
+
s = safe_url_string(url, self.encoding)
|
|
62
|
+
self._url = escape_ajax(s)
|
|
63
|
+
|
|
64
|
+
if (
|
|
65
|
+
"://" not in self._url
|
|
66
|
+
and not self._url.startswith("about:")
|
|
67
|
+
and not self._url.startswith("data:")
|
|
68
|
+
):
|
|
69
|
+
raise ValueError(f"Missing scheme in request url: {self._url}")
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def url(self) -> str:
|
|
73
|
+
return self._url
|
|
74
|
+
|
|
44
75
|
@property
|
|
45
|
-
def meta(self):
|
|
76
|
+
def meta(self) -> Dict[str, Any]:
|
|
46
77
|
return self._meta
|
|
47
78
|
|
|
48
|
-
def __str__(self):
|
|
49
|
-
return f'<Request url={self.url}
|
|
79
|
+
def __str__(self) -> str:
|
|
80
|
+
return f'<Request url={self.url} method={self.method}>'
|
|
50
81
|
|
|
51
|
-
def
|
|
82
|
+
def __repr__(self) -> str:
|
|
83
|
+
return self.__str__()
|
|
84
|
+
|
|
85
|
+
def __lt__(self, other) -> bool:
|
|
52
86
|
return self.priority < other.priority
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# #!/usr/bin/python
|
|
90
|
+
# # -*- coding:UTF-8 -*-
|
|
91
|
+
# import hashlib
|
|
92
|
+
# from copy import deepcopy
|
|
93
|
+
# from w3lib.url import safe_url_string
|
|
94
|
+
# from typing import Dict, Optional, Callable, Union
|
|
95
|
+
#
|
|
96
|
+
# from crawlo.utils.url import escape_ajax
|
|
97
|
+
#
|
|
98
|
+
#
|
|
99
|
+
# class Request(object):
|
|
100
|
+
#
|
|
101
|
+
# def __init__(
|
|
102
|
+
# self,
|
|
103
|
+
# url: str,
|
|
104
|
+
# *,
|
|
105
|
+
# callback: Optional[Callable] = None,
|
|
106
|
+
# headers: Optional[Dict[str, str]] = None,
|
|
107
|
+
# body: Optional[Dict] = None,
|
|
108
|
+
# method: Optional[str] = 'GET',
|
|
109
|
+
# cookies: Optional[Dict[str, str]] = None,
|
|
110
|
+
# priority: int = 0,
|
|
111
|
+
# encoding: Optional[str] = 'UTF-8',
|
|
112
|
+
# meta: Optional[Dict[str, str]] = None,
|
|
113
|
+
# dont_filter: bool = False
|
|
114
|
+
#
|
|
115
|
+
# ):
|
|
116
|
+
# self.url = url
|
|
117
|
+
# self.callback = callback
|
|
118
|
+
# self.headers = headers if headers else {}
|
|
119
|
+
# self.body = body
|
|
120
|
+
# self.method = str(method).upper()
|
|
121
|
+
# self.cookies = cookies
|
|
122
|
+
# self.priority = -priority
|
|
123
|
+
# self.encoding = encoding
|
|
124
|
+
# self.dont_filter = dont_filter
|
|
125
|
+
# self._meta = meta if meta is not None else {}
|
|
126
|
+
#
|
|
127
|
+
# def copy(self):
|
|
128
|
+
# return deepcopy(self)
|
|
129
|
+
#
|
|
130
|
+
# def set_meta(self, key: str, value: str):
|
|
131
|
+
# self._meta[key] = value
|
|
132
|
+
#
|
|
133
|
+
# def _set_url(self, url: str) -> None:
|
|
134
|
+
# if not isinstance(url, str):
|
|
135
|
+
# raise TypeError(f"Request url must be str, got {type(url).__name__}")
|
|
136
|
+
#
|
|
137
|
+
# s = safe_url_string(url, self.encoding)
|
|
138
|
+
# self._url = escape_ajax(s)
|
|
139
|
+
#
|
|
140
|
+
# if (
|
|
141
|
+
# "://" not in self._url
|
|
142
|
+
# and not self._url.startswith("about:")
|
|
143
|
+
# and not self._url.startswith("data:")
|
|
144
|
+
# ):
|
|
145
|
+
# raise ValueError(f"Missing scheme in request url: {self._url}")
|
|
146
|
+
#
|
|
147
|
+
# @property
|
|
148
|
+
# def meta(self):
|
|
149
|
+
# return self._meta
|
|
150
|
+
#
|
|
151
|
+
# def __str__(self):
|
|
152
|
+
# return f'<Request url={self.url}> method={self.method}>'
|
|
153
|
+
#
|
|
154
|
+
# def __lt__(self, other):
|
|
155
|
+
# return self.priority < other.priority
|
|
@@ -1,20 +1,40 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
3
5
|
from crawlo import Item
|
|
4
6
|
from crawlo.spider import Spider
|
|
5
7
|
from crawlo.utils.log import get_logger
|
|
6
8
|
|
|
7
9
|
|
|
8
|
-
class
|
|
10
|
+
class ConsolePipeline:
|
|
11
|
+
"""将Item内容输出到控制台的管道"""
|
|
9
12
|
|
|
10
|
-
def __init__(self,
|
|
11
|
-
self.logger =
|
|
13
|
+
def __init__(self, log_level: str = "DEBUG"):
|
|
14
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
15
|
|
|
13
16
|
@classmethod
|
|
14
|
-
def
|
|
15
|
-
|
|
16
|
-
return cls(
|
|
17
|
+
def from_crawler(cls, crawler):
|
|
18
|
+
"""从crawler实例创建管道"""
|
|
19
|
+
return cls(
|
|
20
|
+
log_level=crawler.settings.get('LOG_LEVEL', 'DEBUG')
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
async def process_item(self, item: Item, spider: Spider) -> Item:
|
|
19
|
-
|
|
20
|
-
|
|
24
|
+
"""处理Item并输出到日志"""
|
|
25
|
+
try:
|
|
26
|
+
item_dict = self._convert_to_serializable(item)
|
|
27
|
+
self.logger.info(f"Item processed: {item_dict}")
|
|
28
|
+
return item
|
|
29
|
+
except Exception as e:
|
|
30
|
+
self.logger.error(f"Error processing item: {e}", exc_info=True)
|
|
31
|
+
raise
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def _convert_to_serializable(item: Item) -> Dict[str, Any]:
|
|
35
|
+
"""将Item转换为可序列化的字典"""
|
|
36
|
+
try:
|
|
37
|
+
return item.to_dict()
|
|
38
|
+
except AttributeError:
|
|
39
|
+
# 兼容没有to_dict方法的Item实现
|
|
40
|
+
return dict(item)
|