crawlo 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (40) hide show
  1. crawlo/__init__.py +1 -0
  2. crawlo/__version__.py +1 -1
  3. crawlo/core/engine.py +9 -7
  4. crawlo/core/processor.py +1 -1
  5. crawlo/core/scheduler.py +32 -8
  6. crawlo/crawler.py +133 -18
  7. crawlo/downloader/playwright_downloader.py +161 -0
  8. crawlo/extension/log_stats.py +4 -4
  9. crawlo/filters/__init__.py +37 -0
  10. crawlo/filters/aioredis_filter.py +130 -0
  11. crawlo/filters/memory_filter.py +203 -0
  12. crawlo/filters/redis_filter.py +120 -0
  13. crawlo/items/__init__.py +40 -2
  14. crawlo/items/items.py +36 -5
  15. crawlo/middleware/retry.py +8 -2
  16. crawlo/network/request.py +215 -33
  17. crawlo/network/response.py +122 -53
  18. crawlo/pipelines/console_pipeline.py +28 -8
  19. crawlo/pipelines/mongo_pipeline.py +114 -2
  20. crawlo/pipelines/mysql_batch_pipline.py +134 -0
  21. crawlo/pipelines/mysql_pipeline.py +192 -2
  22. crawlo/pipelines/pipeline_manager.py +3 -3
  23. crawlo/settings/default_settings.py +51 -1
  24. crawlo/spider/__init__.py +2 -2
  25. crawlo/subscriber.py +90 -11
  26. crawlo/utils/concurrency_manager.py +125 -0
  27. crawlo/utils/date_tools.py +165 -8
  28. crawlo/utils/func_tools.py +74 -14
  29. crawlo/utils/pqueue.py +166 -8
  30. crawlo/utils/project.py +3 -2
  31. crawlo/utils/request.py +85 -0
  32. crawlo/utils/tools.py +303 -0
  33. crawlo/utils/url.py +40 -0
  34. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/METADATA +23 -11
  35. crawlo-1.0.2.dist-info/RECORD +68 -0
  36. crawlo-1.0.0.dist-info/RECORD +0 -59
  37. crawlo-1.0.0.dist-info/licenses/LICENSE +0 -23
  38. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/WHEEL +0 -0
  39. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/entry_points.txt +0 -0
  40. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,130 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Optional
4
+
5
+ import aioredis
6
+
7
+ from crawlo import Request
8
+ from crawlo.filters import BaseFilter
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.utils.request import request_fingerprint
11
+
12
+
13
+ class AioRedisFilter(BaseFilter):
14
+ """使用Redis集合实现的异步请求去重过滤器(适用于分布式爬虫)"""
15
+
16
+ def __init__(
17
+ self,
18
+ redis_key: str,
19
+ client: aioredis.Redis,
20
+ stats: dict,
21
+ debug: bool,
22
+ log_level: str,
23
+ cleanup_fp: bool = False
24
+ ):
25
+ """
26
+ 初始化过滤器
27
+
28
+ 参数说明:
29
+ redis_key: Redis中存储指纹的键名
30
+ client: aioredis客户端实例
31
+ stats: 统计信息字典
32
+ debug: 是否启用调试模式
33
+ log_level: 日志级别
34
+ save_fp: 爬虫关闭时是否保留指纹数据
35
+ """
36
+ # 初始化日志记录器(使用类名作为日志标识)
37
+ self.logger = get_logger(self.__class__.__name__, log_level)
38
+ super().__init__(self.logger, stats, debug)
39
+
40
+ self.redis_key = redis_key # Redis存储键(如:"project:request_fingerprints")
41
+ self.redis = client # Redis异步客户端
42
+ self.cleanup_fp = cleanup_fp # 是否持久化指纹数据
43
+
44
+ @classmethod
45
+ def create_instance(cls, crawler) -> 'BaseFilter':
46
+ """从爬虫配置创建过滤器实例(工厂方法)"""
47
+ # 从配置获取Redis连接参数(带默认值)
48
+ redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
49
+ decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
50
+
51
+ try:
52
+ # 创建Redis连接池(限制最大连接数20)
53
+ redis_client = aioredis.from_url(
54
+ redis_url,
55
+ decode_responses=decode_responses,
56
+ max_connections=20
57
+ )
58
+ except Exception as e:
59
+ raise RuntimeError(f"Redis连接失败 {redis_url}: {str(e)}")
60
+
61
+ # 使用项目名+配置键组合作为Redis键
62
+ return cls(
63
+ redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
64
+ client=redis_client,
65
+ stats=crawler.stats,
66
+ cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
67
+ debug=crawler.settings.get_bool('FILTER_DEBUG', False),
68
+ log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
69
+ )
70
+
71
+ async def requested(self, request: Request) -> bool:
72
+ """
73
+ 检查请求是否重复
74
+
75
+ 参数:
76
+ request: 要检查的请求对象
77
+
78
+ 返回:
79
+ bool: True表示重复请求,False表示新请求
80
+ """
81
+ fp = request_fingerprint(request) # 生成请求指纹
82
+ try:
83
+ # 检查指纹是否已存在集合中
84
+ is_duplicate = await self.redis.sismember(self.redis_key, fp)
85
+ if is_duplicate:
86
+ # self.logger.debug(f"发现重复请求: {fp}")
87
+ return True
88
+
89
+ # 新请求则添加指纹
90
+ await self.add_fingerprint(fp)
91
+ return False
92
+ except aioredis.RedisError as e:
93
+ self.logger.error(f"Redis操作失败: {str(e)}")
94
+ raise # 向上抛出异常
95
+
96
+ async def add_fingerprint(self, fp: str) -> None:
97
+ """向Redis集合添加新指纹"""
98
+ try:
99
+ await self.redis.sadd(self.redis_key, fp)
100
+ self.logger.debug(f"新增指纹: {fp}")
101
+ except aioredis.RedisError as e:
102
+ self.logger.error(f"指纹添加失败: {str(e)}")
103
+ raise
104
+
105
+ async def closed(self, reason: Optional[str] = None) -> None:
106
+ """
107
+ 爬虫关闭时的处理(兼容Scrapy的关闭逻辑)
108
+
109
+ 参数:
110
+ reason: 爬虫关闭原因(Scrapy标准参数)
111
+ """
112
+ if self.cleanup_fp: # 仅在配置明确要求时清理
113
+ try:
114
+ deleted = await self.redis.delete(self.redis_key)
115
+ self.logger.info(
116
+ f"Cleaned {deleted} fingerprints from {self.redis_key} "
117
+ f"(reason: {reason or 'manual'})"
118
+ )
119
+ except aioredis.RedisError as e:
120
+ self.logger.warning(f"Cleanup failed: {e}")
121
+ finally:
122
+ await self._close_redis()
123
+
124
+ async def _close_redis(self) -> None:
125
+ """安全关闭Redis连接"""
126
+ try:
127
+ await self.redis.close()
128
+ await self.redis.connection_pool.disconnect()
129
+ except Exception as e:
130
+ self.logger.warning(f"Redis close error: {e}")
@@ -0,0 +1,203 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import os
4
+ import threading
5
+ from weakref import WeakSet
6
+ from typing import Set, TextIO, Optional
7
+
8
+ from crawlo import Request
9
+ from crawlo.filters import BaseFilter
10
+ from crawlo.utils.log import get_logger
11
+ from crawlo.utils.request import request_fingerprint
12
+
13
+
14
+ class MemoryFilter(BaseFilter):
15
+ """基于内存的高效请求去重过滤器,适用于单机爬虫"""
16
+
17
+ def __init__(self, crawler):
18
+ """
19
+ 初始化内存过滤器
20
+
21
+ :param crawler: 爬虫实例,用于获取配置
22
+ """
23
+ self.fingerprints: Set[str] = set() # 主指纹存储
24
+ self._temp_weak_refs = WeakSet() # 弱引用临时存储(可选)
25
+
26
+ debug = crawler.settings.get_bool('FILTER_DEBUG', False)
27
+ logger = get_logger(
28
+ self.__class__.__name__, # 使用类名替代字符串
29
+ crawler.settings.get('LOG_LEVEL', 'INFO')
30
+ )
31
+ super().__init__(logger, crawler.stats, debug)
32
+
33
+ # 性能计数器
34
+ self._dupe_count = 0
35
+ self._unique_count = 0
36
+
37
+ def add_fingerprint(self, fp: str) -> None:
38
+ """
39
+ 添加请求指纹
40
+
41
+ :param fp: 请求指纹字符串
42
+ :raises TypeError: 如果指纹不是字符串类型
43
+ """
44
+ if not isinstance(fp, str):
45
+ raise TypeError(f"指纹必须是字符串类型,得到 {type(fp)}")
46
+
47
+ self.fingerprints.add(fp)
48
+ self._unique_count += 1
49
+ # self.logger.debug(f"添加指纹: {fp[:10]}...") # 日志截断防止过长
50
+
51
+ def requested(self, request: Request) -> bool:
52
+ """
53
+ 检查请求是否重复(主要接口)
54
+
55
+ :param request: 请求对象
56
+ :return: 是否重复
57
+ """
58
+ fp = request_fingerprint(request)
59
+ if fp in self:
60
+ self._dupe_count += 1
61
+ # self.logger.debug(f"发现重复请求: {fp[:10]}...")
62
+ return True
63
+
64
+ self.add_fingerprint(fp)
65
+ return False
66
+
67
+ def __contains__(self, item: str) -> bool:
68
+ """
69
+ 支持 in 操作符检查
70
+
71
+ :param item: 要检查的指纹
72
+ :return: 是否已存在
73
+ """
74
+ return item in self.fingerprints
75
+
76
+ @property
77
+ def stats_summary(self) -> dict:
78
+ """获取过滤器统计信息"""
79
+ return {
80
+ 'capacity': len(self.fingerprints),
81
+ 'duplicates': self._dupe_count,
82
+ 'uniques': self._unique_count,
83
+ 'memory_usage': self._estimate_memory()
84
+ }
85
+
86
+ def _estimate_memory(self) -> str:
87
+ """估算内存使用量(近似值)"""
88
+ avg_item_size = sum(len(x) for x in self.fingerprints) / max(1, len(self.fingerprints))
89
+ total = len(self.fingerprints) * (avg_item_size + 50) # 50字节额外开销
90
+ return f"{total / (1024 * 1024):.2f} MB"
91
+
92
+ def clear(self) -> None:
93
+ """清空所有指纹数据"""
94
+ self.fingerprints.clear()
95
+ self._dupe_count = 0
96
+ self._unique_count = 0
97
+ self.logger.info("已清空内存过滤器")
98
+
99
+ def close(self) -> None:
100
+ """关闭过滤器(清理资源)"""
101
+ self.clear()
102
+
103
+ # 兼容旧版异步接口
104
+ async def closed(self):
105
+ """兼容异步接口"""
106
+ self.close()
107
+
108
+
109
+ class MemoryFileFilter(BaseFilter):
110
+ """基于内存的请求指纹过滤器,支持原子化文件持久化"""
111
+
112
+ def __init__(self, crawler):
113
+ """
114
+ 初始化过滤器
115
+ :param crawler: Scrapy Crawler对象,用于获取配置
116
+ """
117
+ self.fingerprints: Set[str] = set() # 主存储集合
118
+ self._lock = threading.RLock() # 线程安全锁
119
+ self._file: Optional[TextIO] = None # 文件句柄
120
+
121
+ debug = crawler.settings.get_bool("FILTER_DEBUG", False)
122
+ logger = get_logger(
123
+ self.__class__.__name__, # 使用类名作为日志标识
124
+ crawler.settings.get("LOG_LEVEL", "INFO")
125
+ )
126
+ super().__init__(logger, crawler.stats, debug)
127
+
128
+ # 初始化文件存储
129
+ request_dir = crawler.settings.get("REQUEST_DIR")
130
+ if request_dir:
131
+ self._init_file_store(request_dir)
132
+
133
+ def _init_file_store(self, request_dir: str) -> None:
134
+ """原子化初始化文件存储"""
135
+ with self._lock:
136
+ try:
137
+ os.makedirs(request_dir, exist_ok=True)
138
+ file_path = os.path.join(request_dir, 'request_fingerprints.txt')
139
+
140
+ # 原子化操作:读取现有指纹
141
+ if os.path.exists(file_path):
142
+ with open(file_path, 'r', encoding='utf-8') as f:
143
+ self.fingerprints.update(
144
+ line.strip() for line in f
145
+ if line.strip()
146
+ )
147
+
148
+ # 以追加模式打开文件
149
+ self._file = open(file_path, 'a+', encoding='utf-8')
150
+ self.logger.info(f"Initialized fingerprint file: {file_path}")
151
+
152
+ except Exception as e:
153
+ self.logger.error(f"Failed to init file store: {str(e)}")
154
+ raise
155
+
156
+ def add_fingerprint(self, fp: str) -> None:
157
+ """
158
+ 线程安全的指纹添加操作
159
+ :param fp: 请求指纹字符串
160
+ """
161
+ with self._lock:
162
+ if fp not in self.fingerprints:
163
+ self.fingerprints.add(fp)
164
+ self._persist_fp(fp)
165
+
166
+ def _persist_fp(self, fp: str) -> None:
167
+ """持久化指纹到文件(需在锁保护下调用)"""
168
+ if self._file:
169
+ try:
170
+ self._file.write(f"{fp}\n")
171
+ self._file.flush()
172
+ os.fsync(self._file.fileno()) # 确保写入磁盘
173
+ except IOError as e:
174
+ self.logger.error(f"Failed to persist fingerprint: {str(e)}")
175
+
176
+ def __contains__(self, item: str) -> bool:
177
+ """
178
+ 线程安全的指纹检查
179
+ :param item: 要检查的指纹
180
+ :return: 是否已存在
181
+ """
182
+ with self._lock:
183
+ return item in self.fingerprints
184
+
185
+ def close(self) -> None:
186
+ """安全关闭资源(同步方法)"""
187
+ with self._lock:
188
+ if self._file and not self._file.closed:
189
+ try:
190
+ self._file.flush()
191
+ os.fsync(self._file.fileno())
192
+ finally:
193
+ self._file.close()
194
+ self.logger.info(f"Closed fingerprint file: {self._file.name}")
195
+
196
+ def __del__(self):
197
+ """析构函数双保险"""
198
+ self.close()
199
+
200
+ # 兼容异步接口
201
+ async def closed(self):
202
+ """标准的关闭入口"""
203
+ self.close()
@@ -0,0 +1,120 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import redis
4
+
5
+ from crawlo import Request
6
+ from crawlo.filters import BaseFilter
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.utils.request import request_fingerprint
9
+
10
+
11
+ class RedisFilter(BaseFilter):
12
+ """使用Redis集合实现的同步请求去重过滤器"""
13
+
14
+ def __init__(
15
+ self,
16
+ redis_key: str,
17
+ client: redis.Redis,
18
+ stats: dict,
19
+ debug: bool,
20
+ log_level: str,
21
+ save_fp: bool
22
+ ):
23
+ """
24
+ 初始化过滤器
25
+
26
+ :param redis_key: Redis存储键名
27
+ :param client: redis客户端实例
28
+ :param stats: 统计字典
29
+ :param debug: 是否启用调试模式
30
+ :param log_level: 日志级别
31
+ :param save_fp: 是否保留指纹数据
32
+ """
33
+ self.logger = get_logger(self.__class__.__name__, log_level)
34
+ super().__init__(self.logger, stats, debug)
35
+
36
+ self.redis_key = redis_key
37
+ self.redis = client
38
+ self.save_fp = save_fp
39
+
40
+ @classmethod
41
+ def create_instance(cls, crawler) -> 'BaseFilter':
42
+ """工厂方法创建实例"""
43
+ redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
44
+ decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
45
+
46
+ try:
47
+ # 添加连接池配置
48
+ redis_client = redis.from_url(
49
+ redis_url,
50
+ decode_responses=decode_responses,
51
+ socket_timeout=5, # 超时设置
52
+ socket_connect_timeout=5,
53
+ max_connections=20 # 连接池大小
54
+ )
55
+ # 测试连接是否有效
56
+ redis_client.ping()
57
+ except redis.RedisError as e:
58
+ raise RuntimeError(f"Redis连接失败: {str(e)}")
59
+
60
+ return cls(
61
+ redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
62
+ client=redis_client,
63
+ stats=crawler.stats,
64
+ save_fp=crawler.settings.get_bool('SAVE_FP', False),
65
+ debug=crawler.settings.get_bool('FILTER_DEBUG', False),
66
+ log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
67
+ )
68
+
69
+ def requested(self, request: Request) -> bool:
70
+ """
71
+ 检查请求是否已存在
72
+
73
+ :param request: 请求对象
74
+ :return: 是否重复
75
+ """
76
+ fp = request_fingerprint(request)
77
+ try:
78
+ if self.redis.sismember(self.redis_key, fp):
79
+ self.logger.debug(f"重复请求: {fp}")
80
+ return True
81
+
82
+ self.add_fingerprint(fp)
83
+ return False
84
+ except redis.RedisError as e:
85
+ self.logger.error(f"Redis操作失败: {str(e)}")
86
+ raise
87
+
88
+ def add_fingerprint(self, fp: str) -> None:
89
+ """添加指纹到Redis集合"""
90
+ try:
91
+ self.redis.sadd(self.redis_key, fp)
92
+ self.logger.debug(f"新增指纹: {fp}")
93
+ except redis.RedisError as e:
94
+ self.logger.error(f"指纹添加失败: {str(e)}")
95
+ raise
96
+
97
+ def __contains__(self, item) -> bool:
98
+ """支持 in 操作符检查 (必须返回bool类型)"""
99
+ try:
100
+ # 显式将redis返回的0/1转换为bool
101
+ return bool(self.redis.sismember(self.redis_key, item))
102
+ except redis.RedisError as e:
103
+ self.logger.error(f"Redis查询失败: {str(e)}")
104
+ raise
105
+
106
+ def close(self) -> None:
107
+ """同步清理方法(注意不是异步的closed)"""
108
+ if not self.save_fp:
109
+ try:
110
+ count = self.redis.delete(self.redis_key)
111
+ self.logger.info(f"已清理Redis键 {self.redis_key}, 删除数量: {count}")
112
+ except redis.RedisError as e:
113
+ self.logger.error(f"清理失败: {str(e)}")
114
+ finally:
115
+ # 同步客户端需要手动关闭连接池
116
+ self.redis.close()
117
+
118
+ async def closed(self):
119
+ """兼容异步接口的同步实现"""
120
+ self.close()
crawlo/items/__init__.py CHANGED
@@ -1,10 +1,48 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
3
  from abc import ABCMeta
4
+ from typing import Any, Optional, Type
4
5
 
5
6
 
6
- class Field(dict):
7
- pass
7
+ class Field:
8
+ def __init__(
9
+ self,
10
+ nullable: bool = True,
11
+ *,
12
+ default: Any = None,
13
+ field_type: Optional[Type] = None,
14
+ max_length: Optional[int] = None,
15
+ description: str = ""
16
+ ):
17
+ self.nullable = nullable
18
+ self.default = default
19
+ self.field_type = field_type
20
+ self.max_length = max_length
21
+ self.description = description
22
+
23
+ def validate(self, value: Any, field_name: str = "") -> Any:
24
+ if value is None or (isinstance(value, str) and value.strip() == ""):
25
+ if self.default is not None:
26
+ return self.default
27
+ elif not self.nullable:
28
+ raise ValueError(
29
+ f"字段 '{field_name}' 不允许为空。"
30
+ )
31
+
32
+ if value is not None and not (isinstance(value, str) and value.strip() == ""):
33
+ if self.field_type and not isinstance(value, self.field_type):
34
+ raise TypeError(
35
+ f"字段 '{field_name}' 类型错误:期望类型 {self.field_type}, 得到 {type(value)},值:{value!r}"
36
+ )
37
+ if self.max_length and len(str(value)) > self.max_length:
38
+ raise ValueError(
39
+ f"字段 '{field_name}' 长度超限:最大长度 {self.max_length},当前长度 {len(str(value))},值:{value!r}"
40
+ )
41
+
42
+ return value
43
+
44
+ def __repr__(self):
45
+ return f"<Field required={self.nullable} type={self.field_type} default={self.default}>"
8
46
 
9
47
 
10
48
  class ItemMeta(ABCMeta):
crawlo/items/items.py CHANGED
@@ -21,13 +21,44 @@ class Item(MutableMapping, metaclass=ItemMeta):
21
21
 
22
22
  self._values: Dict[str, Any] = {}
23
23
 
24
+ # 初始化字段,默认值填充
25
+ for field_name, field_obj in self.FIELDS.items():
26
+ if field_obj.default is not None:
27
+ self._values[field_name] = field_obj.default
28
+
29
+ # 覆盖默认值或设置新值
30
+ for key, value in kwargs.items():
31
+ self[key] = value
32
+
24
33
  def __getitem__(self, item: str) -> Any:
25
34
  return self._values[item]
26
35
 
36
+ # def __setitem__(self, key: str, value: Any) -> None:
37
+ # if key not in self.FIELDS:
38
+ # raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
39
+ # self._values[key] = value
40
+
27
41
  def __setitem__(self, key: str, value: Any) -> None:
28
42
  if key not in self.FIELDS:
29
43
  raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
30
- self._values[key] = value
44
+
45
+ field = self.FIELDS[key]
46
+ try:
47
+ validated_value = field.validate(value, field_name=key)
48
+ self._values[key] = validated_value
49
+ except Exception as e:
50
+ error_lines = [
51
+ "",
52
+ "【字段校验失败】",
53
+ f"字段名称: {key}",
54
+ f"数据类型: {type(value)}",
55
+ f"原始值: {repr(value)}",
56
+ f"是否允许空值: {field.nullable}",
57
+ f"错误原因: {str(e)}",
58
+ ""
59
+ ]
60
+ detailed_error = "\n".join(error_lines)
61
+ raise type(e)(detailed_error) from e
31
62
 
32
63
  def __delitem__(self, key: str) -> None:
33
64
  del self._values[key]
@@ -78,11 +109,11 @@ class Item(MutableMapping, metaclass=ItemMeta):
78
109
 
79
110
  if __name__ == '__main__':
80
111
  class TestItem(Item):
81
- url = Field()
82
- title = Field()
112
+ url = Field(nullable=False, field_type=str, max_length=100)
113
+ title = Field(default="无标题", field_type=str)
83
114
 
84
115
  test_item = TestItem()
85
116
  test_item['title'] = '百度首页'
86
- test_item['url'] = 'http://example.com'
117
+ test_item['url'] = 'hhh'
87
118
  # test_item.title = 'fffff'
88
- print(test_item.title)
119
+ print(test_item)
@@ -36,12 +36,14 @@ class RetryMiddleware(object):
36
36
  ignore_http_codes: List,
37
37
  max_retry_times: int,
38
38
  retry_exceptions: List,
39
- stats: StatsCollector
39
+ stats: StatsCollector,
40
+ retry_priority: int
40
41
  ):
41
42
  self.retry_http_codes = retry_http_codes
42
43
  self.ignore_http_codes = ignore_http_codes
43
44
  self.max_retry_times = max_retry_times
44
45
  self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
46
+ self.retry_priority = retry_priority
45
47
  self.stats = stats
46
48
  self.logger = get_logger(self.__class__.__name__)
47
49
 
@@ -52,7 +54,8 @@ class RetryMiddleware(object):
52
54
  ignore_http_codes=crawler.settings.get_list('IGNORE_HTTP_CODES'),
53
55
  max_retry_times=crawler.settings.get_int('MAX_RETRY_TIMES'),
54
56
  retry_exceptions=crawler.settings.get_list('RETRY_EXCEPTIONS'),
55
- stats=crawler.stats
57
+ stats=crawler.stats,
58
+ retry_priority=crawler.settings.get_int('RETRY_PRIORITY')
56
59
  )
57
60
  return o
58
61
 
@@ -77,6 +80,9 @@ class RetryMiddleware(object):
77
80
  retry_times += 1
78
81
  self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
79
82
  request.meta['retry_times'] = retry_times
83
+ # request.dont_retry = True
84
+ request.meta['dont_retry'] = True
85
+ request.retry_priority = request.priority + self.retry_priority
80
86
  self.stats.inc_value("retry_count")
81
87
  return request
82
88
  else: