crawlo 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +1 -0
- crawlo/__version__.py +1 -1
- crawlo/core/engine.py +9 -7
- crawlo/core/processor.py +1 -1
- crawlo/core/scheduler.py +32 -8
- crawlo/downloader/playwright_downloader.py +161 -0
- crawlo/extension/log_stats.py +4 -4
- crawlo/filters/__init__.py +37 -0
- crawlo/filters/aioredis_filter.py +130 -0
- crawlo/filters/memory_filter.py +203 -0
- crawlo/filters/redis_filter.py +120 -0
- crawlo/items/__init__.py +40 -2
- crawlo/items/items.py +36 -5
- crawlo/middleware/retry.py +7 -2
- crawlo/network/request.py +121 -18
- crawlo/pipelines/console_pipeline.py +28 -8
- crawlo/pipelines/mongo_pipeline.py +114 -2
- crawlo/pipelines/mysql_batch_pipline.py +134 -0
- crawlo/pipelines/mysql_pipeline.py +173 -2
- crawlo/pipelines/pipeline_manager.py +3 -3
- crawlo/settings/default_settings.py +51 -1
- crawlo/spider/__init__.py +2 -2
- crawlo/utils/date_tools.py +165 -8
- crawlo/utils/func_tools.py +74 -14
- crawlo/utils/pqueue.py +166 -8
- crawlo/utils/project.py +3 -2
- crawlo/utils/request.py +85 -0
- crawlo/utils/url.py +40 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/METADATA +2 -2
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/RECORD +34 -26
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/WHEEL +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/licenses/LICENSE +0 -0
- {crawlo-1.0.0.dist-info → crawlo-1.0.1.dist-info}/top_level.txt +0 -0
crawlo/__init__.py
CHANGED
crawlo/__version__.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
|
|
2
|
-
__version__ = "1.0.
|
|
2
|
+
__version__ = "1.0.1"
|
crawlo/core/engine.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
import asyncio
|
|
4
|
+
from inspect import iscoroutine
|
|
4
5
|
from typing import Optional, Generator, Callable
|
|
5
|
-
from inspect import iscoroutine, isgenerator, isasyncgen
|
|
6
6
|
|
|
7
7
|
from crawlo import Request, Item
|
|
8
8
|
from crawlo.spider import Spider
|
|
9
|
+
from crawlo.utils.log import get_logger
|
|
10
|
+
from crawlo.exceptions import OutputError
|
|
9
11
|
from crawlo.core.scheduler import Scheduler
|
|
10
12
|
from crawlo.core.processor import Processor
|
|
11
|
-
from crawlo.utils.log import get_logger
|
|
12
13
|
from crawlo.task_manager import TaskManager
|
|
13
14
|
from crawlo.utils.project import load_class
|
|
14
15
|
from crawlo.downloader import DownloaderBase
|
|
15
16
|
from crawlo.utils.func_tools import transform
|
|
16
|
-
from crawlo.
|
|
17
|
-
from crawlo.event import spider_opened, spider_error
|
|
17
|
+
from crawlo.event import spider_opened, spider_error, request_scheduled
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class Engine(object):
|
|
@@ -49,7 +49,7 @@ class Engine(object):
|
|
|
49
49
|
async def start_spider(self, spider):
|
|
50
50
|
self.spider = spider
|
|
51
51
|
|
|
52
|
-
self.scheduler = Scheduler(self.crawler)
|
|
52
|
+
self.scheduler = Scheduler.create_instance(self.crawler)
|
|
53
53
|
if hasattr(self.scheduler, 'open'):
|
|
54
54
|
self.scheduler.open()
|
|
55
55
|
|
|
@@ -115,7 +115,7 @@ class Engine(object):
|
|
|
115
115
|
if iscoroutine(_outputs):
|
|
116
116
|
await _outputs
|
|
117
117
|
else:
|
|
118
|
-
return transform(_outputs)
|
|
118
|
+
return transform(_outputs, _response)
|
|
119
119
|
|
|
120
120
|
_response = await self.downloader.fetch(request)
|
|
121
121
|
if _response is None:
|
|
@@ -128,7 +128,8 @@ class Engine(object):
|
|
|
128
128
|
|
|
129
129
|
async def _schedule_request(self, request):
|
|
130
130
|
# TODO 去重
|
|
131
|
-
await self.scheduler.enqueue_request(request)
|
|
131
|
+
if await self.scheduler.enqueue_request(request):
|
|
132
|
+
asyncio.create_task(self.crawler.subscriber.notify(request_scheduled, request, self.crawler.spider))
|
|
132
133
|
|
|
133
134
|
async def _get_next_request(self):
|
|
134
135
|
return await self.scheduler.next_request()
|
|
@@ -152,6 +153,7 @@ class Engine(object):
|
|
|
152
153
|
|
|
153
154
|
async def close_spider(self):
|
|
154
155
|
await asyncio.gather(*self.task_manager.current_task)
|
|
156
|
+
await self.scheduler.close()
|
|
155
157
|
await self.downloader.close()
|
|
156
158
|
if self.normal:
|
|
157
159
|
await self.crawler.close()
|
crawlo/core/processor.py
CHANGED
|
@@ -15,7 +15,7 @@ class Processor(object):
|
|
|
15
15
|
self.pipelines: Optional[PipelineManager] = None
|
|
16
16
|
|
|
17
17
|
def open(self):
|
|
18
|
-
self.pipelines = PipelineManager.
|
|
18
|
+
self.pipelines = PipelineManager.from_crawler(self.crawler)
|
|
19
19
|
|
|
20
20
|
async def process(self):
|
|
21
21
|
while not self.idle():
|
crawlo/core/scheduler.py
CHANGED
|
@@ -1,35 +1,59 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
-
import
|
|
4
|
-
from typing import Optional
|
|
3
|
+
from typing import Optional, Callable
|
|
5
4
|
|
|
6
5
|
from crawlo.utils.log import get_logger
|
|
7
|
-
from crawlo.
|
|
6
|
+
from crawlo.utils.request import set_request
|
|
8
7
|
from crawlo.utils.pqueue import SpiderPriorityQueue
|
|
8
|
+
from crawlo.utils.project import load_class, common_call
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class Scheduler:
|
|
12
|
-
def __init__(self, crawler):
|
|
12
|
+
def __init__(self, crawler, dupe_filter, stats, log_level, priority):
|
|
13
13
|
self.crawler = crawler
|
|
14
14
|
self.request_queue: Optional[SpiderPriorityQueue] = None
|
|
15
15
|
|
|
16
|
-
self.item_count = 0
|
|
17
|
-
self.response_count = 0
|
|
18
|
-
self.logger = get_logger(name=self.__class__.__name__, level=
|
|
16
|
+
# self.item_count = 0
|
|
17
|
+
# self.response_count = 0
|
|
18
|
+
self.logger = get_logger(name=self.__class__.__name__, level=log_level)
|
|
19
|
+
self.stats = stats
|
|
20
|
+
self.dupe_filter = dupe_filter
|
|
21
|
+
self.priority = priority
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def create_instance(cls, crawler):
|
|
25
|
+
filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
|
|
26
|
+
o = cls(
|
|
27
|
+
crawler=crawler,
|
|
28
|
+
dupe_filter=filter_cls.create_instance(crawler),
|
|
29
|
+
stats=crawler.stats,
|
|
30
|
+
log_level=crawler.settings.get('LOG_LEVEL'),
|
|
31
|
+
priority=crawler.settings.get('DEPTH_PRIORITY')
|
|
32
|
+
)
|
|
33
|
+
return o
|
|
19
34
|
|
|
20
35
|
def open(self):
|
|
21
36
|
self.request_queue = SpiderPriorityQueue()
|
|
37
|
+
self.logger.info(f'requesting filter: {self.dupe_filter}')
|
|
22
38
|
|
|
23
39
|
async def next_request(self):
|
|
24
40
|
request = await self.request_queue.get()
|
|
25
41
|
return request
|
|
26
42
|
|
|
27
43
|
async def enqueue_request(self, request):
|
|
44
|
+
if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
|
|
45
|
+
self.dupe_filter.log_stats(request)
|
|
46
|
+
return False
|
|
47
|
+
set_request(request, self.priority)
|
|
28
48
|
await self.request_queue.put(request)
|
|
29
|
-
|
|
49
|
+
return True
|
|
30
50
|
|
|
31
51
|
def idle(self) -> bool:
|
|
32
52
|
return len(self) == 0
|
|
33
53
|
|
|
54
|
+
async def close(self):
|
|
55
|
+
if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
|
|
56
|
+
await closed()
|
|
57
|
+
|
|
34
58
|
def __len__(self):
|
|
35
59
|
return self.request_queue.qsize()
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from typing import Optional, Dict, Any
|
|
4
|
+
from playwright.async_api import Browser, Page, Response as PlaywrightResponse
|
|
5
|
+
from crawlo import Response, Request
|
|
6
|
+
from crawlo.downloader import DownloaderBase
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PlaywrightDownloader(DownloaderBase):
|
|
10
|
+
def __init__(self, crawler):
|
|
11
|
+
super().__init__(crawler)
|
|
12
|
+
# Playwright 核心对象
|
|
13
|
+
self.browser: Optional[Browser] = None # 浏览器实例
|
|
14
|
+
self.context: Optional[Any] = None # 浏览器上下文(隔离cookies等)
|
|
15
|
+
|
|
16
|
+
# 可配置参数(通过crawler.settings覆盖默认值)
|
|
17
|
+
self._browser_type: str = "chromium" # 浏览器类型(chromium/firefox/webkit)
|
|
18
|
+
self._headless: bool = True # 是否无头模式
|
|
19
|
+
self._timeout: int = 30000 # 操作超时(毫秒)
|
|
20
|
+
self._viewport: Dict[str, int] = {"width": 1280, "height": 720} # 视口大小
|
|
21
|
+
self._extra_launch_args: Dict[str, Any] = {} # 浏览器启动额外参数
|
|
22
|
+
|
|
23
|
+
async def _init_browser(self):
|
|
24
|
+
"""初始化Playwright浏览器实例"""
|
|
25
|
+
from playwright.async_api import async_playwright
|
|
26
|
+
|
|
27
|
+
# 启动Playwright引擎
|
|
28
|
+
playwright = await async_playwright().start()
|
|
29
|
+
|
|
30
|
+
# 根据配置选择浏览器类型
|
|
31
|
+
browser_launcher = {
|
|
32
|
+
"chromium": playwright.chromium,
|
|
33
|
+
"firefox": playwright.firefox,
|
|
34
|
+
"webkit": playwright.webkit
|
|
35
|
+
}.get(self._browser_type, playwright.chromium) # 默认chromium
|
|
36
|
+
|
|
37
|
+
# 启动浏览器(含启动参数)
|
|
38
|
+
self.browser = await browser_launcher.launch(
|
|
39
|
+
headless=self._headless, # 无头模式开关
|
|
40
|
+
timeout=self._timeout, # 启动超时
|
|
41
|
+
**self._extra_launch_args # 透传额外参数(如代理配置)
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# 创建浏览器上下文(隔离环境)
|
|
45
|
+
self.context = await self.browser.new_context(
|
|
46
|
+
viewport=self._viewport, # 设置窗口大小
|
|
47
|
+
user_agent=self.crawler.settings.get("USER_AGENT") # 自定义UA
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def open(self):
|
|
51
|
+
"""从crawler配置加载参数"""
|
|
52
|
+
super().open() # 调用父类初始化
|
|
53
|
+
|
|
54
|
+
# 读取配置(支持在settings.py中覆盖)
|
|
55
|
+
self._browser_type = self.crawler.settings.get("PLAYWRIGHT_BROWSER", "chromium")
|
|
56
|
+
self._headless = self.crawler.settings.get_bool("HEADLESS", True)
|
|
57
|
+
self._timeout = self.crawler.settings.get_int("PLAYWRIGHT_TIMEOUT", 30000)
|
|
58
|
+
self._viewport = self.crawler.settings.get_dict("VIEWPORT", {"width": 1280, "height": 720})
|
|
59
|
+
self._extra_launch_args = self.crawler.settings.get_dict("PLAYWRIGHT_LAUNCH_ARGS", {})
|
|
60
|
+
|
|
61
|
+
async def download(self, request: Request) -> Response:
|
|
62
|
+
"""
|
|
63
|
+
核心下载方法:
|
|
64
|
+
1. 创建新页面Tab
|
|
65
|
+
2. 加载目标URL
|
|
66
|
+
3. 获取渲染后的内容
|
|
67
|
+
"""
|
|
68
|
+
if not self.browser:
|
|
69
|
+
await self._init_browser() # 懒加载浏览器
|
|
70
|
+
|
|
71
|
+
page = await self.context.new_page() # 每个请求独立Page(自动隔离)
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
# 设置请求头(模拟浏览器)
|
|
75
|
+
if request.headers:
|
|
76
|
+
await page.set_extra_http_headers(request.headers)
|
|
77
|
+
|
|
78
|
+
# 导航到目标URL(支持等待策略配置)
|
|
79
|
+
response = await page.goto(
|
|
80
|
+
request.url,
|
|
81
|
+
timeout=self._timeout,
|
|
82
|
+
wait_until="domcontentloaded" # 等待策略:domcontentloaded/networkidle/load
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# 特殊处理POST请求(Playwright限制需用API方式)
|
|
86
|
+
if request.method.lower() == "post":
|
|
87
|
+
return await self._handle_post_request(request, page)
|
|
88
|
+
|
|
89
|
+
# 执行自定义JavaScript(用于提取动态数据)
|
|
90
|
+
if request.meta.get("execute_js"):
|
|
91
|
+
result = await page.evaluate(request.meta["execute_js"])
|
|
92
|
+
request.meta["js_result"] = result # 存储JS执行结果
|
|
93
|
+
|
|
94
|
+
# 获取渲染后的完整HTML(含动态生成内容)
|
|
95
|
+
body = await page.content()
|
|
96
|
+
|
|
97
|
+
# 调试模式下截图(用于排查页面问题)
|
|
98
|
+
if self.crawler.settings.get_bool("DEBUG"):
|
|
99
|
+
screenshot = await page.screenshot(type="png")
|
|
100
|
+
request.meta["screenshot"] = screenshot # 截图存入request.meta
|
|
101
|
+
|
|
102
|
+
# 构造统一响应对象
|
|
103
|
+
return self._structure_response(request, response, body)
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
self.logger.error(f"页面下载失败: {str(e)}")
|
|
107
|
+
raise
|
|
108
|
+
finally:
|
|
109
|
+
await page.close() # 确保页面关闭,避免资源泄漏
|
|
110
|
+
|
|
111
|
+
async def _handle_post_request(self, request: Request, page: Page) -> Response:
|
|
112
|
+
"""
|
|
113
|
+
处理POST请求的特殊方法:
|
|
114
|
+
通过页面内fetch API发送POST请求,并监听响应
|
|
115
|
+
"""
|
|
116
|
+
async with page.expect_response(request.url) as response_info:
|
|
117
|
+
# 在页面上下文中执行fetch
|
|
118
|
+
await page.evaluate(
|
|
119
|
+
"""async ({url, headers, body}) => {
|
|
120
|
+
await fetch(url, {
|
|
121
|
+
method: 'POST',
|
|
122
|
+
headers: headers,
|
|
123
|
+
body: body
|
|
124
|
+
});
|
|
125
|
+
}""",
|
|
126
|
+
{
|
|
127
|
+
"url": request.url,
|
|
128
|
+
"headers": request.headers or {},
|
|
129
|
+
"body": request.body or ""
|
|
130
|
+
}
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
response = await response_info.value # 获取API响应
|
|
134
|
+
body = await response.text() # 读取响应体
|
|
135
|
+
return self._structure_response(request, response, body)
|
|
136
|
+
|
|
137
|
+
@staticmethod
|
|
138
|
+
def _structure_response(
|
|
139
|
+
request: Request,
|
|
140
|
+
response: PlaywrightResponse,
|
|
141
|
+
body: str
|
|
142
|
+
) -> Response:
|
|
143
|
+
"""
|
|
144
|
+
标准化响应格式:
|
|
145
|
+
将Playwright的响应转换为crawlo的统一Response对象
|
|
146
|
+
"""
|
|
147
|
+
return Response(
|
|
148
|
+
url=str(response.url), # 最终URL(含重定向)
|
|
149
|
+
headers=response.headers, # 响应头
|
|
150
|
+
status_code=response.status, # HTTP状态码
|
|
151
|
+
body=body.encode('utf-8'), # 响应体(转bytes)
|
|
152
|
+
request=request # 关联的请求对象
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
async def close(self) -> None:
|
|
156
|
+
"""资源清理:关闭浏览器实例和上下文"""
|
|
157
|
+
if self.context:
|
|
158
|
+
await self.context.close()
|
|
159
|
+
if self.browser:
|
|
160
|
+
await self.browser.close()
|
|
161
|
+
await super().close() # 调用父类清理逻辑
|
crawlo/extension/log_stats.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
from crawlo import event
|
|
4
|
-
from crawlo.utils.date_tools import
|
|
4
|
+
from crawlo.utils.date_tools import get_current_time, time_diff_seconds
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class LogStats(object):
|
|
@@ -22,11 +22,11 @@ class LogStats(object):
|
|
|
22
22
|
return o
|
|
23
23
|
|
|
24
24
|
async def spider_opened(self):
|
|
25
|
-
self._stats['start_time'] =
|
|
25
|
+
self._stats['start_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
|
|
26
26
|
|
|
27
27
|
async def spider_closed(self):
|
|
28
|
-
self._stats['end_time'] =
|
|
29
|
-
self._stats['cost_time(s)'] =
|
|
28
|
+
self._stats['end_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
|
|
29
|
+
self._stats['cost_time(s)'] = time_diff_seconds(start_time=self._stats['start_time'], end_time=self._stats['end_time'])
|
|
30
30
|
|
|
31
31
|
async def item_successful(self, _item, _spider):
|
|
32
32
|
self._stats.inc_value('item_successful_count')
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
|
|
5
|
+
from crawlo import Request
|
|
6
|
+
from crawlo.utils.request import request_fingerprint
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseFilter(ABC):
|
|
10
|
+
|
|
11
|
+
def __init__(self, logger, stats, debug: bool):
|
|
12
|
+
self.logger = logger
|
|
13
|
+
self.stats = stats
|
|
14
|
+
self.debug = debug
|
|
15
|
+
|
|
16
|
+
@classmethod
|
|
17
|
+
def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
|
|
18
|
+
return cls(*args, **kwargs)
|
|
19
|
+
|
|
20
|
+
def requested(self, request: Request):
|
|
21
|
+
fp = request_fingerprint(request)
|
|
22
|
+
if fp in self:
|
|
23
|
+
return True
|
|
24
|
+
self.add_fingerprint(fp)
|
|
25
|
+
return False
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def add_fingerprint(self, fp) -> None:
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
def log_stats(self, request: Request) -> None:
|
|
32
|
+
if self.debug:
|
|
33
|
+
self.logger.debug(f'Filtered duplicate request: {request}')
|
|
34
|
+
self.stats.inc_value(f'{self}/filtered_count')
|
|
35
|
+
|
|
36
|
+
def __str__(self) -> str:
|
|
37
|
+
return f'{self.__class__.__name__}'
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import aioredis
|
|
6
|
+
|
|
7
|
+
from crawlo import Request
|
|
8
|
+
from crawlo.filters import BaseFilter
|
|
9
|
+
from crawlo.utils.log import get_logger
|
|
10
|
+
from crawlo.utils.request import request_fingerprint
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AioRedisFilter(BaseFilter):
|
|
14
|
+
"""使用Redis集合实现的异步请求去重过滤器(适用于分布式爬虫)"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
redis_key: str,
|
|
19
|
+
client: aioredis.Redis,
|
|
20
|
+
stats: dict,
|
|
21
|
+
debug: bool,
|
|
22
|
+
log_level: str,
|
|
23
|
+
cleanup_fp: bool = False
|
|
24
|
+
):
|
|
25
|
+
"""
|
|
26
|
+
初始化过滤器
|
|
27
|
+
|
|
28
|
+
参数说明:
|
|
29
|
+
redis_key: Redis中存储指纹的键名
|
|
30
|
+
client: aioredis客户端实例
|
|
31
|
+
stats: 统计信息字典
|
|
32
|
+
debug: 是否启用调试模式
|
|
33
|
+
log_level: 日志级别
|
|
34
|
+
save_fp: 爬虫关闭时是否保留指纹数据
|
|
35
|
+
"""
|
|
36
|
+
# 初始化日志记录器(使用类名作为日志标识)
|
|
37
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
38
|
+
super().__init__(self.logger, stats, debug)
|
|
39
|
+
|
|
40
|
+
self.redis_key = redis_key # Redis存储键(如:"project:request_fingerprints")
|
|
41
|
+
self.redis = client # Redis异步客户端
|
|
42
|
+
self.cleanup_fp = cleanup_fp # 是否持久化指纹数据
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def create_instance(cls, crawler) -> 'BaseFilter':
|
|
46
|
+
"""从爬虫配置创建过滤器实例(工厂方法)"""
|
|
47
|
+
# 从配置获取Redis连接参数(带默认值)
|
|
48
|
+
redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
|
|
49
|
+
decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
# 创建Redis连接池(限制最大连接数20)
|
|
53
|
+
redis_client = aioredis.from_url(
|
|
54
|
+
redis_url,
|
|
55
|
+
decode_responses=decode_responses,
|
|
56
|
+
max_connections=20
|
|
57
|
+
)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
raise RuntimeError(f"Redis连接失败 {redis_url}: {str(e)}")
|
|
60
|
+
|
|
61
|
+
# 使用项目名+配置键组合作为Redis键
|
|
62
|
+
return cls(
|
|
63
|
+
redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
|
|
64
|
+
client=redis_client,
|
|
65
|
+
stats=crawler.stats,
|
|
66
|
+
cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
|
|
67
|
+
debug=crawler.settings.get_bool('FILTER_DEBUG', False),
|
|
68
|
+
log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
async def requested(self, request: Request) -> bool:
|
|
72
|
+
"""
|
|
73
|
+
检查请求是否重复
|
|
74
|
+
|
|
75
|
+
参数:
|
|
76
|
+
request: 要检查的请求对象
|
|
77
|
+
|
|
78
|
+
返回:
|
|
79
|
+
bool: True表示重复请求,False表示新请求
|
|
80
|
+
"""
|
|
81
|
+
fp = request_fingerprint(request) # 生成请求指纹
|
|
82
|
+
try:
|
|
83
|
+
# 检查指纹是否已存在集合中
|
|
84
|
+
is_duplicate = await self.redis.sismember(self.redis_key, fp)
|
|
85
|
+
if is_duplicate:
|
|
86
|
+
# self.logger.debug(f"发现重复请求: {fp}")
|
|
87
|
+
return True
|
|
88
|
+
|
|
89
|
+
# 新请求则添加指纹
|
|
90
|
+
await self.add_fingerprint(fp)
|
|
91
|
+
return False
|
|
92
|
+
except aioredis.RedisError as e:
|
|
93
|
+
self.logger.error(f"Redis操作失败: {str(e)}")
|
|
94
|
+
raise # 向上抛出异常
|
|
95
|
+
|
|
96
|
+
async def add_fingerprint(self, fp: str) -> None:
|
|
97
|
+
"""向Redis集合添加新指纹"""
|
|
98
|
+
try:
|
|
99
|
+
await self.redis.sadd(self.redis_key, fp)
|
|
100
|
+
self.logger.debug(f"新增指纹: {fp}")
|
|
101
|
+
except aioredis.RedisError as e:
|
|
102
|
+
self.logger.error(f"指纹添加失败: {str(e)}")
|
|
103
|
+
raise
|
|
104
|
+
|
|
105
|
+
async def closed(self, reason: Optional[str] = None) -> None:
|
|
106
|
+
"""
|
|
107
|
+
爬虫关闭时的处理(兼容Scrapy的关闭逻辑)
|
|
108
|
+
|
|
109
|
+
参数:
|
|
110
|
+
reason: 爬虫关闭原因(Scrapy标准参数)
|
|
111
|
+
"""
|
|
112
|
+
if self.cleanup_fp: # 仅在配置明确要求时清理
|
|
113
|
+
try:
|
|
114
|
+
deleted = await self.redis.delete(self.redis_key)
|
|
115
|
+
self.logger.info(
|
|
116
|
+
f"Cleaned {deleted} fingerprints from {self.redis_key} "
|
|
117
|
+
f"(reason: {reason or 'manual'})"
|
|
118
|
+
)
|
|
119
|
+
except aioredis.RedisError as e:
|
|
120
|
+
self.logger.warning(f"Cleanup failed: {e}")
|
|
121
|
+
finally:
|
|
122
|
+
await self._close_redis()
|
|
123
|
+
|
|
124
|
+
async def _close_redis(self) -> None:
|
|
125
|
+
"""安全关闭Redis连接"""
|
|
126
|
+
try:
|
|
127
|
+
await self.redis.close()
|
|
128
|
+
await self.redis.connection_pool.disconnect()
|
|
129
|
+
except Exception as e:
|
|
130
|
+
self.logger.warning(f"Redis close error: {e}")
|