crawlo 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__version__.py +1 -1
- crawlo/core/scheduler.py +20 -16
- crawlo/downloader/httpx_downloader.py +14 -12
- crawlo/exceptions.py +4 -0
- crawlo/extension/__init__.py +17 -10
- crawlo/extension/health_check.py +142 -0
- crawlo/extension/log_interval.py +27 -18
- crawlo/extension/log_stats.py +62 -24
- crawlo/extension/logging_extension.py +18 -9
- crawlo/extension/memory_monitor.py +89 -0
- crawlo/extension/performance_profiler.py +118 -0
- crawlo/extension/request_recorder.py +108 -0
- crawlo/filters/aioredis_filter.py +2 -2
- crawlo/middleware/retry.py +3 -3
- crawlo/network/request.py +2 -2
- crawlo/network/response.py +25 -23
- crawlo/pipelines/__init__.py +9 -0
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +81 -66
- crawlo/pipelines/mysql_pipeline.py +165 -43
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/queue/queue_manager.py +4 -0
- crawlo/queue/redis_priority_queue.py +20 -3
- crawlo/settings/default_settings.py +119 -66
- crawlo/subscriber.py +62 -37
- crawlo/templates/project/items.py.tmpl +1 -1
- crawlo/templates/project/middlewares.py.tmpl +73 -49
- crawlo/templates/project/pipelines.py.tmpl +52 -290
- crawlo/templates/project/run.py.tmpl +20 -7
- crawlo/templates/project/settings.py.tmpl +35 -3
- crawlo/templates/spider/spider.py.tmpl +1 -37
- crawlo/utils/controlled_spider_mixin.py +109 -5
- crawlo-1.1.4.dist-info/METADATA +403 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/RECORD +40 -31
- examples/controlled_spider_example.py +205 -0
- crawlo-1.1.2.dist-info/METADATA +0 -567
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
crawlo/__version__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.1.
|
|
1
|
+
__version__ = "1.1.4"
|
crawlo/core/scheduler.py
CHANGED
|
@@ -34,21 +34,25 @@ class Scheduler:
|
|
|
34
34
|
|
|
35
35
|
async def open(self):
|
|
36
36
|
"""初始化调度器和队列"""
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
37
|
+
try:
|
|
38
|
+
# 创建队列配置
|
|
39
|
+
queue_config = QueueConfig.from_settings(self.crawler.settings)
|
|
40
|
+
|
|
41
|
+
# 创建队列管理器
|
|
42
|
+
self.queue_manager = QueueManager(queue_config)
|
|
43
|
+
|
|
44
|
+
# 初始化队列
|
|
45
|
+
success = await self.queue_manager.initialize()
|
|
46
|
+
if not success:
|
|
47
|
+
raise RuntimeError("队列初始化失败")
|
|
48
|
+
|
|
49
|
+
# 输出队列状态
|
|
50
|
+
status = self.queue_manager.get_status()
|
|
51
|
+
self.logger.info(f'队列类型: {status["type"]}, 状态: {status["health"]}')
|
|
52
|
+
self.logger.info(f'requesting filter: {self.dupe_filter}')
|
|
53
|
+
except Exception as e:
|
|
54
|
+
self.logger.error(f"❌ 调度器初始化失败: {e}")
|
|
55
|
+
raise
|
|
52
56
|
|
|
53
57
|
async def next_request(self):
|
|
54
58
|
"""获取下一个请求"""
|
|
@@ -159,4 +163,4 @@ class Scheduler:
|
|
|
159
163
|
# await closed()
|
|
160
164
|
#
|
|
161
165
|
# def __len__(self):
|
|
162
|
-
# return self.request_queue.qsize()
|
|
166
|
+
# return self.request_queue.qsize()
|
|
@@ -26,6 +26,7 @@ except ImportError:
|
|
|
26
26
|
# 定义我们认为是网络问题,应该触发降级的异常
|
|
27
27
|
NETWORK_EXCEPTIONS = (ConnectError, TimeoutException, NetworkError)
|
|
28
28
|
|
|
29
|
+
|
|
29
30
|
class HttpXDownloader(DownloaderBase):
|
|
30
31
|
"""
|
|
31
32
|
基于 httpx 的高性能异步下载器
|
|
@@ -66,15 +67,15 @@ class HttpXDownloader(DownloaderBase):
|
|
|
66
67
|
self._client_timeout = Timeout(
|
|
67
68
|
connect=10.0, # 建立连接超时
|
|
68
69
|
read=timeout_total - 10.0 if timeout_total > 10 else timeout_total / 2, # 读取数据超时
|
|
69
|
-
write=10.0,
|
|
70
|
-
pool=1.0
|
|
70
|
+
write=10.0, # 发送数据超时
|
|
71
|
+
pool=1.0 # 从连接池获取连接的超时
|
|
71
72
|
)
|
|
72
73
|
self._client_limits = Limits(
|
|
73
74
|
max_connections=pool_limit,
|
|
74
75
|
max_keepalive_connections=pool_per_host
|
|
75
76
|
)
|
|
76
77
|
self._client_verify = self.crawler.settings.get_bool("VERIFY_SSL", True)
|
|
77
|
-
self._client_http2 = True
|
|
78
|
+
self._client_http2 = True # 启用 HTTP/2 支持
|
|
78
79
|
# ----------------------------
|
|
79
80
|
|
|
80
81
|
# 创建持久化客户端 (不在此处设置全局代理)
|
|
@@ -102,7 +103,7 @@ class HttpXDownloader(DownloaderBase):
|
|
|
102
103
|
# --- 1. 确定要使用的 client 实例 ---
|
|
103
104
|
effective_client = self._client # 默认使用共享的主 client
|
|
104
105
|
temp_client = None # 用于可能创建的临时 client
|
|
105
|
-
used_proxy = None
|
|
106
|
+
used_proxy = None # 记录当前尝试使用的代理
|
|
106
107
|
|
|
107
108
|
try:
|
|
108
109
|
# --- 2. 构造发送参数 (不包含 proxy/proxies) ---
|
|
@@ -123,7 +124,7 @@ class HttpXDownloader(DownloaderBase):
|
|
|
123
124
|
kwargs["content"] = request.body # 使用 content 而不是 data
|
|
124
125
|
|
|
125
126
|
# --- 3. 处理代理 ---
|
|
126
|
-
httpx_proxy_config = None
|
|
127
|
+
httpx_proxy_config = None # 用于初始化临时 client 的代理配置
|
|
127
128
|
if request.proxy:
|
|
128
129
|
# 根据 request.proxy 的类型准备 httpx 的 proxy 参数
|
|
129
130
|
if isinstance(request.proxy, str):
|
|
@@ -156,14 +157,15 @@ class HttpXDownloader(DownloaderBase):
|
|
|
156
157
|
limits=self._client_limits,
|
|
157
158
|
verify=self._client_verify,
|
|
158
159
|
http2=self._client_http2,
|
|
159
|
-
follow_redirects=True,
|
|
160
|
-
proxy=httpx_proxy_config,
|
|
160
|
+
follow_redirects=True, # 确保继承
|
|
161
|
+
proxy=httpx_proxy_config, # 设置代理
|
|
161
162
|
)
|
|
162
163
|
effective_client = temp_client
|
|
163
|
-
used_proxy = httpx_proxy_config
|
|
164
|
+
used_proxy = httpx_proxy_config # 记录使用的代理
|
|
164
165
|
self.logger.debug(f"Using temporary client with proxy: {httpx_proxy_config} for {request.url}")
|
|
165
166
|
except Exception as e:
|
|
166
|
-
self.logger.error(
|
|
167
|
+
self.logger.error(
|
|
168
|
+
f"Failed to create temporary client with proxy {httpx_proxy_config} for {request.url}: {e}")
|
|
167
169
|
# 出错则回退到使用主 client(无代理)
|
|
168
170
|
# 可以选择抛出异常或继续
|
|
169
171
|
# raise # 如果希望代理失败导致请求失败,取消注释
|
|
@@ -181,7 +183,7 @@ class HttpXDownloader(DownloaderBase):
|
|
|
181
183
|
)
|
|
182
184
|
# 关闭失败的临时客户端
|
|
183
185
|
await temp_client.aclose()
|
|
184
|
-
temp_client = None
|
|
186
|
+
temp_client = None # 防止 finally 再次关闭
|
|
185
187
|
|
|
186
188
|
# 切换到主客户端(直连)
|
|
187
189
|
effective_client = self._client
|
|
@@ -223,7 +225,7 @@ class HttpXDownloader(DownloaderBase):
|
|
|
223
225
|
try:
|
|
224
226
|
error_body = await e.response.aread()
|
|
225
227
|
except Exception:
|
|
226
|
-
error_body = b""
|
|
228
|
+
error_body = b"" # 如果读取错误响应体失败,则为空
|
|
227
229
|
return self.structure_response(request=request, response=e.response, body=error_body)
|
|
228
230
|
except Exception as e:
|
|
229
231
|
self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
|
|
@@ -244,7 +246,7 @@ class HttpXDownloader(DownloaderBase):
|
|
|
244
246
|
return Response(
|
|
245
247
|
url=str(response.url), # httpx 的 URL 是对象,需转字符串
|
|
246
248
|
headers=dict(response.headers),
|
|
247
|
-
status_code=response.status_code,
|
|
249
|
+
status_code=response.status_code, # 注意:使用 status_code
|
|
248
250
|
body=body,
|
|
249
251
|
request=request
|
|
250
252
|
)
|
crawlo/exceptions.py
CHANGED
crawlo/extension/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import List, Any
|
|
4
4
|
from pprint import pformat
|
|
5
5
|
|
|
6
6
|
from crawlo.utils.log import get_logger
|
|
@@ -10,7 +10,7 @@ from crawlo.exceptions import ExtensionInitError
|
|
|
10
10
|
|
|
11
11
|
class ExtensionManager(object):
|
|
12
12
|
|
|
13
|
-
def __init__(self, crawler):
|
|
13
|
+
def __init__(self, crawler: Any):
|
|
14
14
|
self.crawler = crawler
|
|
15
15
|
self.extensions: List = []
|
|
16
16
|
extensions = self.crawler.settings.get_list('EXTENSIONS')
|
|
@@ -18,14 +18,21 @@ class ExtensionManager(object):
|
|
|
18
18
|
self._add_extensions(extensions)
|
|
19
19
|
|
|
20
20
|
@classmethod
|
|
21
|
-
def create_instance(cls, *args, **kwargs):
|
|
21
|
+
def create_instance(cls, *args: Any, **kwargs: Any) -> 'ExtensionManager':
|
|
22
22
|
return cls(*args, **kwargs)
|
|
23
23
|
|
|
24
|
-
def _add_extensions(self, extensions):
|
|
25
|
-
for
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
24
|
+
def _add_extensions(self, extensions: List[str]) -> None:
|
|
25
|
+
for extension_path in extensions:
|
|
26
|
+
try:
|
|
27
|
+
extension_cls = load_class(extension_path)
|
|
28
|
+
if not hasattr(extension_cls, 'create_instance'):
|
|
29
|
+
raise ExtensionInitError(
|
|
30
|
+
f"Extension '{extension_path}' init failed: Must have method 'create_instance()'"
|
|
31
|
+
)
|
|
32
|
+
self.extensions.append(extension_cls.create_instance(self.crawler))
|
|
33
|
+
except Exception as e:
|
|
34
|
+
self.logger.error(f"Failed to load extension '{extension_path}': {e}")
|
|
35
|
+
raise ExtensionInitError(f"Failed to load extension '{extension_path}': {e}")
|
|
36
|
+
|
|
30
37
|
if extensions:
|
|
31
|
-
self.logger.info(f"
|
|
38
|
+
self.logger.info(f"Enabled extensions: \n{pformat(extensions)}")
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Any, Optional, Dict
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
|
|
7
|
+
from crawlo.utils.log import get_logger
|
|
8
|
+
from crawlo.event import spider_opened, spider_closed, response_received, request_scheduled
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class HealthCheckExtension:
|
|
12
|
+
"""
|
|
13
|
+
健康检查扩展
|
|
14
|
+
监控爬虫的健康状态,包括响应时间、错误率等指标
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, crawler: Any):
|
|
18
|
+
self.settings = crawler.settings
|
|
19
|
+
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
20
|
+
|
|
21
|
+
# 获取配置参数
|
|
22
|
+
self.enabled = self.settings.get_bool('HEALTH_CHECK_ENABLED', True)
|
|
23
|
+
self.check_interval = self.settings.get_int('HEALTH_CHECK_INTERVAL', 60) # 默认60秒
|
|
24
|
+
|
|
25
|
+
# 健康状态统计
|
|
26
|
+
self.stats: Dict[str, Any] = {
|
|
27
|
+
'start_time': None,
|
|
28
|
+
'total_requests': 0,
|
|
29
|
+
'total_responses': 0,
|
|
30
|
+
'error_responses': 0,
|
|
31
|
+
'last_check_time': None,
|
|
32
|
+
'response_times': [], # 存储最近的响应时间
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
self.task: Optional[asyncio.Task] = None
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
def create_instance(cls, crawler: Any) -> 'HealthCheckExtension':
|
|
39
|
+
# 只有当配置启用时才创建实例
|
|
40
|
+
if not crawler.settings.get_bool('HEALTH_CHECK_ENABLED', True):
|
|
41
|
+
from crawlo.exceptions import NotConfigured
|
|
42
|
+
raise NotConfigured("HealthCheckExtension: HEALTH_CHECK_ENABLED is False")
|
|
43
|
+
|
|
44
|
+
o = cls(crawler)
|
|
45
|
+
if o.enabled:
|
|
46
|
+
crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
|
|
47
|
+
crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
|
|
48
|
+
crawler.subscriber.subscribe(o.response_received, event=response_received)
|
|
49
|
+
crawler.subscriber.subscribe(o.request_scheduled, event=request_scheduled)
|
|
50
|
+
return o
|
|
51
|
+
|
|
52
|
+
async def spider_opened(self) -> None:
|
|
53
|
+
"""爬虫启动时初始化健康检查"""
|
|
54
|
+
if not self.enabled:
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
self.stats['start_time'] = datetime.now()
|
|
58
|
+
self.task = asyncio.create_task(self._health_check_loop())
|
|
59
|
+
self.logger.info("Health check extension started.")
|
|
60
|
+
|
|
61
|
+
async def spider_closed(self) -> None:
|
|
62
|
+
"""爬虫关闭时停止健康检查"""
|
|
63
|
+
if not self.enabled:
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
if self.task:
|
|
67
|
+
self.task.cancel()
|
|
68
|
+
try:
|
|
69
|
+
await self.task
|
|
70
|
+
except asyncio.CancelledError:
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
# 输出最终健康状态
|
|
74
|
+
await self._check_health()
|
|
75
|
+
self.logger.info("Health check extension stopped.")
|
|
76
|
+
|
|
77
|
+
async def request_scheduled(self, request: Any, spider: Any) -> None:
|
|
78
|
+
"""记录调度的请求"""
|
|
79
|
+
if not self.enabled:
|
|
80
|
+
return
|
|
81
|
+
self.stats['total_requests'] += 1
|
|
82
|
+
|
|
83
|
+
async def response_received(self, response: Any, spider: Any) -> None:
|
|
84
|
+
"""记录接收到的响应"""
|
|
85
|
+
if not self.enabled:
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
self.stats['total_responses'] += 1
|
|
89
|
+
|
|
90
|
+
# 记录错误响应
|
|
91
|
+
if hasattr(response, 'status_code') and response.status_code >= 400:
|
|
92
|
+
self.stats['error_responses'] += 1
|
|
93
|
+
|
|
94
|
+
async def _health_check_loop(self) -> None:
|
|
95
|
+
"""健康检查循环"""
|
|
96
|
+
while True:
|
|
97
|
+
try:
|
|
98
|
+
await asyncio.sleep(self.check_interval)
|
|
99
|
+
await self._check_health()
|
|
100
|
+
except asyncio.CancelledError:
|
|
101
|
+
break
|
|
102
|
+
except Exception as e:
|
|
103
|
+
self.logger.error(f"Error in health check loop: {e}")
|
|
104
|
+
|
|
105
|
+
async def _check_health(self) -> None:
|
|
106
|
+
"""执行健康检查并输出报告"""
|
|
107
|
+
try:
|
|
108
|
+
now_time = datetime.now()
|
|
109
|
+
self.stats['last_check_time'] = now_time
|
|
110
|
+
|
|
111
|
+
# 计算基本统计信息
|
|
112
|
+
runtime = (now_time - self.stats['start_time']).total_seconds() if self.stats['start_time'] else 0
|
|
113
|
+
requests_per_second = self.stats['total_requests'] / runtime if runtime > 0 else 0
|
|
114
|
+
responses_per_second = self.stats['total_responses'] / runtime if runtime > 0 else 0
|
|
115
|
+
|
|
116
|
+
# 计算错误率
|
|
117
|
+
error_rate = (
|
|
118
|
+
self.stats['error_responses'] / self.stats['total_responses']
|
|
119
|
+
if self.stats['total_responses'] > 0 else 0
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# 输出健康报告
|
|
123
|
+
health_report = {
|
|
124
|
+
'runtime_seconds': round(runtime, 2),
|
|
125
|
+
'total_requests': self.stats['total_requests'],
|
|
126
|
+
'total_responses': self.stats['total_responses'],
|
|
127
|
+
'requests_per_second': round(requests_per_second, 2),
|
|
128
|
+
'responses_per_second': round(responses_per_second, 2),
|
|
129
|
+
'error_responses': self.stats['error_responses'],
|
|
130
|
+
'error_rate': f"{error_rate:.2%}",
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
# 根据错误率判断健康状态
|
|
134
|
+
if error_rate > 0.1: # 错误率超过10%
|
|
135
|
+
self.logger.warning(f"Health check report: {health_report}")
|
|
136
|
+
elif error_rate > 0.05: # 错误率超过5%
|
|
137
|
+
self.logger.info(f"Health check report: {health_report}")
|
|
138
|
+
else:
|
|
139
|
+
self.logger.debug(f"Health check report: {health_report}")
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
self.logger.error(f"Error in health check: {e}")
|
crawlo/extension/log_interval.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
import asyncio
|
|
4
|
+
from typing import Any, Optional
|
|
4
5
|
|
|
5
6
|
from crawlo.utils.log import get_logger
|
|
6
7
|
from crawlo.event import spider_opened, spider_closed
|
|
@@ -8,12 +9,12 @@ from crawlo.event import spider_opened, spider_closed
|
|
|
8
9
|
|
|
9
10
|
class LogIntervalExtension(object):
|
|
10
11
|
|
|
11
|
-
def __init__(self, crawler):
|
|
12
|
-
self.task = None
|
|
12
|
+
def __init__(self, crawler: Any):
|
|
13
|
+
self.task: Optional[asyncio.Task] = None
|
|
13
14
|
self.stats = crawler.stats
|
|
14
15
|
self.item_count = 0
|
|
15
16
|
self.response_count = 0
|
|
16
|
-
self.seconds = crawler.settings.get('INTERVAL')
|
|
17
|
+
self.seconds = crawler.settings.get('INTERVAL', 60) # 默认60秒
|
|
17
18
|
self.interval = int(self.seconds / 60) if self.seconds % 60 == 0 else self.seconds
|
|
18
19
|
self.interval = "" if self.interval == 1 else self.interval
|
|
19
20
|
self.unit = 'min' if self.seconds % 60 == 0 else 's'
|
|
@@ -21,29 +22,37 @@ class LogIntervalExtension(object):
|
|
|
21
22
|
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
22
23
|
|
|
23
24
|
@classmethod
|
|
24
|
-
def create_instance(cls, crawler):
|
|
25
|
+
def create_instance(cls, crawler: Any) -> 'LogIntervalExtension':
|
|
25
26
|
o = cls(crawler)
|
|
26
27
|
crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
|
|
27
28
|
crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
|
|
28
29
|
return o
|
|
29
30
|
|
|
30
|
-
async def spider_opened(self):
|
|
31
|
+
async def spider_opened(self) -> None:
|
|
31
32
|
self.task = asyncio.create_task(self.interval_log())
|
|
32
|
-
await self.task
|
|
33
33
|
|
|
34
|
-
async def spider_closed(self):
|
|
34
|
+
async def spider_closed(self) -> None:
|
|
35
35
|
if self.task:
|
|
36
36
|
self.task.cancel()
|
|
37
|
+
try:
|
|
38
|
+
await self.task
|
|
39
|
+
except asyncio.CancelledError:
|
|
40
|
+
pass
|
|
41
|
+
self.task = None
|
|
37
42
|
|
|
38
|
-
async def interval_log(self):
|
|
43
|
+
async def interval_log(self) -> None:
|
|
39
44
|
while True:
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
45
|
+
try:
|
|
46
|
+
last_item_count = self.stats.get_value('item_successful_count', default=0)
|
|
47
|
+
last_response_count = self.stats.get_value('response_received_count', default=0)
|
|
48
|
+
item_rate = last_item_count - self.item_count
|
|
49
|
+
response_rate = last_response_count - self.response_count
|
|
50
|
+
self.item_count, self.response_count = last_item_count, last_response_count
|
|
51
|
+
self.logger.info(
|
|
52
|
+
f'Crawled {last_response_count} pages (at {response_rate} pages/{self.interval}{self.unit}),'
|
|
53
|
+
f' Got {last_item_count} items (at {item_rate} items/{self.interval}{self.unit}).'
|
|
54
|
+
)
|
|
55
|
+
await asyncio.sleep(self.seconds)
|
|
56
|
+
except Exception as e:
|
|
57
|
+
self.logger.error(f"Error in interval logging: {e}")
|
|
58
|
+
await asyncio.sleep(self.seconds) # 即使出错也继续执行
|
crawlo/extension/log_stats.py
CHANGED
|
@@ -1,44 +1,82 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
3
5
|
from crawlo import event
|
|
4
6
|
from crawlo.utils.date_tools import now, time_diff
|
|
5
7
|
|
|
6
8
|
|
|
7
9
|
class LogStats(object):
|
|
8
10
|
|
|
9
|
-
def __init__(self, stats):
|
|
11
|
+
def __init__(self, stats: Any):
|
|
10
12
|
self._stats = stats
|
|
11
13
|
|
|
12
14
|
@classmethod
|
|
13
|
-
def create_instance(cls, crawler):
|
|
15
|
+
def create_instance(cls, crawler: Any) -> 'LogStats':
|
|
14
16
|
o = cls(crawler.stats)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
17
|
+
# 订阅所有需要的事件
|
|
18
|
+
event_subscriptions = [
|
|
19
|
+
(o.spider_opened, event.spider_opened),
|
|
20
|
+
(o.spider_closed, event.spider_closed),
|
|
21
|
+
(o.item_successful, event.item_successful),
|
|
22
|
+
(o.item_discard, event.item_discard),
|
|
23
|
+
(o.response_received, event.response_received),
|
|
24
|
+
(o.request_scheduled, event.request_scheduled),
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
for handler, evt in event_subscriptions:
|
|
28
|
+
try:
|
|
29
|
+
crawler.subscriber.subscribe(handler, event=evt)
|
|
30
|
+
except Exception as e:
|
|
31
|
+
# 获取日志记录器并记录错误
|
|
32
|
+
from crawlo.utils.log import get_logger
|
|
33
|
+
logger = get_logger(cls.__name__)
|
|
34
|
+
logger.error(f"Failed to subscribe to event {evt}: {e}")
|
|
21
35
|
|
|
22
36
|
return o
|
|
23
37
|
|
|
24
|
-
async def spider_opened(self):
|
|
25
|
-
|
|
38
|
+
async def spider_opened(self) -> None:
|
|
39
|
+
try:
|
|
40
|
+
self._stats['start_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
|
|
41
|
+
except Exception as e:
|
|
42
|
+
# 静默处理,避免影响爬虫运行
|
|
43
|
+
pass
|
|
26
44
|
|
|
27
|
-
async def spider_closed(self):
|
|
28
|
-
|
|
29
|
-
|
|
45
|
+
async def spider_closed(self) -> None:
|
|
46
|
+
try:
|
|
47
|
+
self._stats['end_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
|
|
48
|
+
self._stats['cost_time(s)'] = time_diff(start=self._stats['start_time'], end=self._stats['end_time'])
|
|
49
|
+
except Exception as e:
|
|
50
|
+
# 静默处理,避免影响爬虫运行
|
|
51
|
+
pass
|
|
30
52
|
|
|
31
|
-
async def item_successful(self, _item, _spider):
|
|
32
|
-
|
|
53
|
+
async def item_successful(self, _item: Any, _spider: Any) -> None:
|
|
54
|
+
try:
|
|
55
|
+
self._stats.inc_value('item_successful_count')
|
|
56
|
+
except Exception as e:
|
|
57
|
+
# 静默处理,避免影响爬虫运行
|
|
58
|
+
pass
|
|
33
59
|
|
|
34
|
-
async def item_discard(self, _item, exc, _spider):
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
60
|
+
async def item_discard(self, _item: Any, exc: Any, _spider: Any) -> None:
|
|
61
|
+
try:
|
|
62
|
+
self._stats.inc_value('item_discard_count')
|
|
63
|
+
reason = getattr(exc, 'msg', None) # 更安全地获取属性
|
|
64
|
+
if reason:
|
|
65
|
+
self._stats.inc_value(f"item_discard/{reason}")
|
|
66
|
+
except Exception as e:
|
|
67
|
+
# 静默处理,避免影响爬虫运行
|
|
68
|
+
pass
|
|
39
69
|
|
|
40
|
-
async def response_received(self, _response, _spider):
|
|
41
|
-
|
|
70
|
+
async def response_received(self, _response: Any, _spider: Any) -> None:
|
|
71
|
+
try:
|
|
72
|
+
self._stats.inc_value('response_received_count')
|
|
73
|
+
except Exception as e:
|
|
74
|
+
# 静默处理,避免影响爬虫运行
|
|
75
|
+
pass
|
|
42
76
|
|
|
43
|
-
async def request_scheduled(self, _request, _spider):
|
|
44
|
-
|
|
77
|
+
async def request_scheduled(self, _request: Any, _spider: Any) -> None:
|
|
78
|
+
try:
|
|
79
|
+
self._stats.inc_value('request_scheduler_count')
|
|
80
|
+
except Exception as e:
|
|
81
|
+
# 静默处理,避免影响爬虫运行
|
|
82
|
+
pass
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from typing import Any
|
|
1
2
|
from crawlo.exceptions import NotConfigured
|
|
2
3
|
from crawlo.utils.log import get_logger
|
|
3
4
|
from crawlo.utils.log import LoggerManager
|
|
@@ -9,27 +10,35 @@ class CustomLoggerExtension:
|
|
|
9
10
|
遵循与 ExtensionManager 一致的接口规范:使用 create_instance
|
|
10
11
|
"""
|
|
11
12
|
|
|
12
|
-
def __init__(self, settings):
|
|
13
|
+
def __init__(self, settings: Any):
|
|
13
14
|
self.settings = settings
|
|
14
15
|
# 初始化全局日志配置
|
|
15
16
|
LoggerManager.configure(settings)
|
|
16
17
|
|
|
17
18
|
@classmethod
|
|
18
|
-
def create_instance(cls, crawler, *args, **kwargs):
|
|
19
|
+
def create_instance(cls, crawler: Any, *args: Any, **kwargs: Any) -> 'CustomLoggerExtension':
|
|
19
20
|
"""
|
|
20
21
|
工厂方法:兼容 ExtensionManager 的创建方式
|
|
21
22
|
被 ExtensionManager 调用
|
|
22
23
|
"""
|
|
23
24
|
# 可以通过 settings 控制是否启用
|
|
24
|
-
|
|
25
|
+
log_file = crawler.settings.get('LOG_FILE')
|
|
26
|
+
log_enable_custom = crawler.settings.get('LOG_ENABLE_CUSTOM', False)
|
|
27
|
+
|
|
28
|
+
# 只有当没有配置日志文件且未启用自定义日志时才禁用
|
|
29
|
+
if not log_file and not log_enable_custom:
|
|
25
30
|
raise NotConfigured("CustomLoggerExtension: LOG_FILE not set and LOG_ENABLE_CUSTOM=False")
|
|
26
31
|
|
|
27
32
|
return cls(crawler.settings)
|
|
28
33
|
|
|
29
|
-
def spider_opened(self, spider):
|
|
34
|
+
def spider_opened(self, spider: Any) -> None:
|
|
30
35
|
logger = get_logger(__name__)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
+
try:
|
|
37
|
+
logger.info(
|
|
38
|
+
f"CustomLoggerExtension: Logging initialized. "
|
|
39
|
+
f"LOG_FILE={self.settings.get('LOG_FILE')}, "
|
|
40
|
+
f"LOG_LEVEL={self.settings.get('LOG_LEVEL')}"
|
|
41
|
+
)
|
|
42
|
+
except Exception as e:
|
|
43
|
+
# 即使日志初始化信息无法打印,也不应该影响程序运行
|
|
44
|
+
pass
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
import asyncio
|
|
4
|
+
import psutil
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
from crawlo.utils.log import get_logger
|
|
8
|
+
from crawlo.event import spider_opened, spider_closed
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MemoryMonitorExtension:
|
|
12
|
+
"""
|
|
13
|
+
内存监控扩展
|
|
14
|
+
定期监控爬虫进程的内存使用情况,并在超出阈值时发出警告
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, crawler: Any):
|
|
18
|
+
self.task: Optional[asyncio.Task] = None
|
|
19
|
+
self.process = psutil.Process()
|
|
20
|
+
self.settings = crawler.settings
|
|
21
|
+
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
22
|
+
|
|
23
|
+
# 获取配置参数
|
|
24
|
+
self.interval = self.settings.get_int('MEMORY_MONITOR_INTERVAL', 60) # 默认60秒检查一次
|
|
25
|
+
self.warning_threshold = self.settings.get_float('MEMORY_WARNING_THRESHOLD', 80.0) # 默认80%警告阈值
|
|
26
|
+
self.critical_threshold = self.settings.get_float('MEMORY_CRITICAL_THRESHOLD', 90.0) # 默认90%严重阈值
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def create_instance(cls, crawler: Any) -> 'MemoryMonitorExtension':
|
|
30
|
+
# 只有当配置启用时才创建实例
|
|
31
|
+
if not crawler.settings.get_bool('MEMORY_MONITOR_ENABLED', False):
|
|
32
|
+
from crawlo.exceptions import NotConfigured
|
|
33
|
+
raise NotConfigured("MemoryMonitorExtension: MEMORY_MONITOR_ENABLED is False")
|
|
34
|
+
|
|
35
|
+
o = cls(crawler)
|
|
36
|
+
crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
|
|
37
|
+
crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
|
|
38
|
+
return o
|
|
39
|
+
|
|
40
|
+
async def spider_opened(self) -> None:
|
|
41
|
+
"""爬虫启动时开始监控"""
|
|
42
|
+
self.task = asyncio.create_task(self._monitor_loop())
|
|
43
|
+
self.logger.info(
|
|
44
|
+
f"Memory monitor started. Interval: {self.interval}s, "
|
|
45
|
+
f"Warning threshold: {self.warning_threshold}%, Critical threshold: {self.critical_threshold}%"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
async def spider_closed(self) -> None:
|
|
49
|
+
"""爬虫关闭时停止监控"""
|
|
50
|
+
if self.task:
|
|
51
|
+
self.task.cancel()
|
|
52
|
+
try:
|
|
53
|
+
await self.task
|
|
54
|
+
except asyncio.CancelledError:
|
|
55
|
+
pass
|
|
56
|
+
self.task = None
|
|
57
|
+
self.logger.info("Memory monitor stopped.")
|
|
58
|
+
|
|
59
|
+
async def _monitor_loop(self) -> None:
|
|
60
|
+
"""内存监控循环"""
|
|
61
|
+
while True:
|
|
62
|
+
try:
|
|
63
|
+
# 获取内存使用信息
|
|
64
|
+
memory_info = self.process.memory_info()
|
|
65
|
+
memory_percent = self.process.memory_percent()
|
|
66
|
+
|
|
67
|
+
# 记录内存使用情况
|
|
68
|
+
self.logger.debug(
|
|
69
|
+
f"Memory usage: {memory_percent:.2f}% "
|
|
70
|
+
f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB, "
|
|
71
|
+
f"VMS: {memory_info.vms / 1024 / 1024:.2f} MB)"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# 检查是否超过阈值
|
|
75
|
+
if memory_percent >= self.critical_threshold:
|
|
76
|
+
self.logger.critical(
|
|
77
|
+
f"Memory usage critical: {memory_percent:.2f}% "
|
|
78
|
+
f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB)"
|
|
79
|
+
)
|
|
80
|
+
elif memory_percent >= self.warning_threshold:
|
|
81
|
+
self.logger.warning(
|
|
82
|
+
f"Memory usage high: {memory_percent:.2f}% "
|
|
83
|
+
f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB)"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
await asyncio.sleep(self.interval)
|
|
87
|
+
except Exception as e:
|
|
88
|
+
self.logger.error(f"Error in memory monitoring: {e}")
|
|
89
|
+
await asyncio.sleep(self.interval)
|