crawlo 1.3.9__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +9 -4
- crawlo/__version__.py +1 -1
- crawlo/commands/run.py +1 -1
- crawlo/core/__init__.py +8 -2
- crawlo/core/processor.py +11 -3
- crawlo/core/scheduler.py +2 -2
- crawlo/crawler.py +12 -0
- crawlo/extension/__init__.py +25 -0
- crawlo/extension/log_interval.py +44 -7
- crawlo/extension/log_stats.py +26 -37
- crawlo/initialization/__init__.py +6 -2
- crawlo/middleware/middleware_manager.py +1 -1
- crawlo/middleware/response_code.py +1 -14
- crawlo/mode_manager.py +13 -7
- crawlo/pipelines/bloom_dedup_pipeline.py +5 -15
- crawlo/pipelines/database_dedup_pipeline.py +5 -8
- crawlo/pipelines/memory_dedup_pipeline.py +5 -15
- crawlo/pipelines/pipeline_manager.py +15 -7
- crawlo/pipelines/redis_dedup_pipeline.py +7 -17
- crawlo/project.py +18 -7
- crawlo/settings/default_settings.py +114 -150
- crawlo/settings/setting_manager.py +14 -9
- crawlo/tools/distributed_coordinator.py +4 -8
- crawlo/utils/fingerprint.py +123 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/METADATA +1 -1
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/RECORD +51 -35
- examples/test_project/__init__.py +7 -0
- examples/test_project/run.py +35 -0
- examples/test_project/test_project/__init__.py +4 -0
- examples/test_project/test_project/items.py +18 -0
- examples/test_project/test_project/middlewares.py +119 -0
- examples/test_project/test_project/pipelines.py +97 -0
- examples/test_project/test_project/settings.py +170 -0
- examples/test_project/test_project/spiders/__init__.py +10 -0
- examples/test_project/test_project/spiders/of_week_dis.py +144 -0
- tests/debug_framework_logger.py +1 -1
- tests/debug_log_levels.py +1 -1
- tests/test_all_pipeline_fingerprints.py +134 -0
- tests/test_default_header_middleware.py +242 -87
- tests/test_fingerprint_consistency.py +136 -0
- tests/test_fingerprint_simple.py +52 -0
- tests/test_framework_logger.py +1 -1
- tests/test_framework_startup.py +1 -1
- tests/test_hash_performance.py +100 -0
- tests/test_mode_change.py +1 -1
- tests/test_offsite_middleware.py +185 -162
- tests/test_offsite_middleware_simple.py +204 -0
- tests/test_pipeline_fingerprint_consistency.py +87 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/WHEEL +0 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/top_level.txt +0 -0
crawlo/__init__.py
CHANGED
|
@@ -28,30 +28,35 @@ from crawlo import tools
|
|
|
28
28
|
|
|
29
29
|
# 框架核心模块 - 使用TYPE_CHECKING避免循环导入
|
|
30
30
|
if TYPE_CHECKING:
|
|
31
|
-
from crawlo.
|
|
31
|
+
from crawlo.initialization import get_framework_initializer, initialize_framework
|
|
32
32
|
|
|
33
33
|
# 为了向后兼容,从tools中导入cleaners相关的功能
|
|
34
34
|
import crawlo.tools as cleaners
|
|
35
35
|
|
|
36
|
+
|
|
36
37
|
# 延迟导入的辅助函数
|
|
37
38
|
def get_framework_initializer():
|
|
38
39
|
"""延迟导入get_framework_initializer以避免循环依赖"""
|
|
39
|
-
from crawlo.
|
|
40
|
+
from crawlo.initialization import get_framework_initializer as _get_framework_initializer
|
|
40
41
|
return _get_framework_initializer()
|
|
41
42
|
|
|
43
|
+
|
|
42
44
|
def initialize_framework(custom_settings=None):
|
|
43
45
|
"""延迟导入initialize_framework以避免循环依赖"""
|
|
44
|
-
from crawlo.
|
|
46
|
+
from crawlo.initialization import initialize_framework as _initialize_framework
|
|
45
47
|
return _initialize_framework(custom_settings)
|
|
46
48
|
|
|
49
|
+
|
|
47
50
|
# 向后兼容的别名
|
|
48
51
|
def get_bootstrap_manager():
|
|
49
52
|
"""向后兼容的别名"""
|
|
50
53
|
return get_framework_initializer()
|
|
51
54
|
|
|
55
|
+
|
|
52
56
|
# 版本号:优先从元数据读取
|
|
53
57
|
try:
|
|
54
58
|
from importlib.metadata import version
|
|
59
|
+
|
|
55
60
|
__version__ = version("crawlo")
|
|
56
61
|
except Exception:
|
|
57
62
|
# 开发模式下可能未安装,回退到 __version__.py 或 dev
|
|
@@ -85,4 +90,4 @@ __all__ = [
|
|
|
85
90
|
'get_framework_initializer',
|
|
86
91
|
'get_bootstrap_manager',
|
|
87
92
|
'__version__',
|
|
88
|
-
]
|
|
93
|
+
]
|
crawlo/__version__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = '1.
|
|
1
|
+
__version__ = '1.4.1'
|
crawlo/commands/run.py
CHANGED
|
@@ -23,7 +23,7 @@ from crawlo.crawler import CrawlerProcess
|
|
|
23
23
|
from crawlo.project import get_settings, _find_project_root
|
|
24
24
|
# 使用新的统一初始化系统
|
|
25
25
|
from crawlo.initialization import initialize_framework
|
|
26
|
-
from crawlo.
|
|
26
|
+
from crawlo.initialization import get_framework_initializer
|
|
27
27
|
from crawlo.utils.log import get_logger
|
|
28
28
|
|
|
29
29
|
# 延迟获取logger,确保在日志系统配置之后获取
|
crawlo/core/__init__.py
CHANGED
|
@@ -10,37 +10,43 @@ from ..initialization import (
|
|
|
10
10
|
is_framework_ready
|
|
11
11
|
)
|
|
12
12
|
|
|
13
|
+
|
|
13
14
|
# 向后兼容的别名
|
|
14
15
|
def async_initialize_framework(*args, **kwargs):
|
|
15
16
|
"""Async wrapper for framework initialization"""
|
|
16
17
|
return initialize_framework(*args, **kwargs)
|
|
17
18
|
|
|
19
|
+
|
|
18
20
|
def get_framework_initializer():
|
|
19
21
|
"""Get framework initializer - compatibility function"""
|
|
20
22
|
from ..initialization.core import CoreInitializer
|
|
21
23
|
return CoreInitializer()
|
|
22
24
|
|
|
25
|
+
|
|
23
26
|
def get_framework_logger(name='crawlo.core'):
|
|
24
27
|
"""Get framework logger - compatibility function"""
|
|
25
28
|
from ..logging import get_logger
|
|
26
29
|
return get_logger(name)
|
|
27
30
|
|
|
31
|
+
|
|
28
32
|
# 向后兼容
|
|
29
33
|
def bootstrap_framework(*args, **kwargs):
|
|
30
34
|
"""Bootstrap framework - compatibility function"""
|
|
31
35
|
return initialize_framework(*args, **kwargs)
|
|
32
36
|
|
|
37
|
+
|
|
33
38
|
def get_bootstrap_manager():
|
|
34
39
|
"""Get bootstrap manager - compatibility function"""
|
|
35
40
|
return get_framework_initializer()
|
|
36
41
|
|
|
42
|
+
|
|
37
43
|
__all__ = [
|
|
38
44
|
'initialize_framework',
|
|
39
|
-
'async_initialize_framework',
|
|
45
|
+
'async_initialize_framework',
|
|
40
46
|
'get_framework_initializer',
|
|
41
47
|
'is_framework_ready',
|
|
42
48
|
'get_framework_logger',
|
|
43
49
|
# 向后兼容
|
|
44
50
|
'bootstrap_framework',
|
|
45
51
|
'get_bootstrap_manager'
|
|
46
|
-
]
|
|
52
|
+
]
|
crawlo/core/processor.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
-
from asyncio import Queue
|
|
3
|
+
from asyncio import Queue, create_task
|
|
4
4
|
from typing import Union, Optional
|
|
5
5
|
|
|
6
6
|
from crawlo import Request, Item
|
|
7
7
|
from crawlo.pipelines.pipeline_manager import PipelineManager
|
|
8
|
+
from crawlo.exceptions import ItemDiscard
|
|
9
|
+
from crawlo.event import item_discard
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
class Processor(object):
|
|
@@ -27,7 +29,13 @@ class Processor(object):
|
|
|
27
29
|
await self._process_item(result)
|
|
28
30
|
|
|
29
31
|
async def _process_item(self, item):
|
|
30
|
-
|
|
32
|
+
try:
|
|
33
|
+
await self.pipelines.process_item(item=item)
|
|
34
|
+
except ItemDiscard as exc:
|
|
35
|
+
# Item was discarded by a pipeline (e.g., deduplication pipeline)
|
|
36
|
+
# We simply ignore this item and don't pass it to subsequent pipelines
|
|
37
|
+
# The statistics system has already been notified in PipelineManager, so we don't need to notify again
|
|
38
|
+
pass
|
|
31
39
|
|
|
32
40
|
async def enqueue(self, output: Union[Request, Item]):
|
|
33
41
|
await self.queue.put(output)
|
|
@@ -37,4 +45,4 @@ class Processor(object):
|
|
|
37
45
|
return len(self) == 0
|
|
38
46
|
|
|
39
47
|
def __len__(self):
|
|
40
|
-
return self.queue.qsize()
|
|
48
|
+
return self.queue.qsize()
|
crawlo/core/scheduler.py
CHANGED
|
@@ -77,8 +77,8 @@ class Scheduler:
|
|
|
77
77
|
# 只有在确实需要更新配置时才重新创建过滤器实例
|
|
78
78
|
# 检查是否真的进行了配置更新
|
|
79
79
|
filter_updated = (
|
|
80
|
-
(self.queue_manager._queue_type == QueueType.REDIS and '
|
|
81
|
-
(self.queue_manager._queue_type == QueueType.MEMORY and
|
|
80
|
+
(self.queue_manager._queue_type == QueueType.REDIS and 'aioredis_filter' in self.crawler.settings.get('FILTER_CLASS', '')) or
|
|
81
|
+
(self.queue_manager._queue_type == QueueType.MEMORY and 'memory_filter' in self.crawler.settings.get('FILTER_CLASS', ''))
|
|
82
82
|
)
|
|
83
83
|
|
|
84
84
|
if needs_config_update or filter_updated:
|
crawlo/crawler.py
CHANGED
|
@@ -308,6 +308,18 @@ class ModernCrawler:
|
|
|
308
308
|
except Exception as e:
|
|
309
309
|
self._logger.warning(f"Spider cleanup failed: {e}")
|
|
310
310
|
|
|
311
|
+
# 调用StatsCollector的close_spider方法,设置reason和spider_name
|
|
312
|
+
if self._stats and hasattr(self._stats, 'close_spider'):
|
|
313
|
+
try:
|
|
314
|
+
# 使用默认的'finished'作为reason
|
|
315
|
+
self._stats.close_spider(self._spider, reason='finished')
|
|
316
|
+
except Exception as e:
|
|
317
|
+
self._logger.warning(f"Stats close_spider failed: {e}")
|
|
318
|
+
|
|
319
|
+
# 触发spider_closed事件,通知所有订阅者(包括扩展)
|
|
320
|
+
# 传递reason参数,这里使用默认的'finished'作为reason
|
|
321
|
+
await self.subscriber.notify("spider_closed", reason='finished')
|
|
322
|
+
|
|
311
323
|
if self._stats and hasattr(self._stats, 'close'):
|
|
312
324
|
try:
|
|
313
325
|
close_result = self._stats.close()
|
crawlo/extension/__init__.py
CHANGED
|
@@ -16,6 +16,7 @@ class ExtensionManager(object):
|
|
|
16
16
|
extensions = self.crawler.settings.get_list('EXTENSIONS')
|
|
17
17
|
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
18
18
|
self._add_extensions(extensions)
|
|
19
|
+
self._subscribe_extensions()
|
|
19
20
|
|
|
20
21
|
@classmethod
|
|
21
22
|
def create_instance(cls, *args: Any, **kwargs: Any) -> 'ExtensionManager':
|
|
@@ -37,3 +38,27 @@ class ExtensionManager(object):
|
|
|
37
38
|
if extensions:
|
|
38
39
|
# 恢复INFO级别日志,保留关键的启用信息
|
|
39
40
|
self.logger.info(f"Enabled extensions: \n{pformat(extensions)}")
|
|
41
|
+
|
|
42
|
+
def _subscribe_extensions(self) -> None:
|
|
43
|
+
"""订阅扩展方法到相应的事件"""
|
|
44
|
+
for extension in self.extensions:
|
|
45
|
+
# 订阅 spider_closed 方法
|
|
46
|
+
if hasattr(extension, 'spider_closed'):
|
|
47
|
+
self.crawler.subscriber.subscribe(extension.spider_closed, event="spider_closed")
|
|
48
|
+
|
|
49
|
+
# 订阅 item_successful 方法
|
|
50
|
+
if hasattr(extension, 'item_successful'):
|
|
51
|
+
self.crawler.subscriber.subscribe(extension.item_successful, event="item_successful")
|
|
52
|
+
|
|
53
|
+
# 订阅 item_discard 方法
|
|
54
|
+
if hasattr(extension, 'item_discard'):
|
|
55
|
+
self.crawler.subscriber.subscribe(extension.item_discard, event="item_discard")
|
|
56
|
+
|
|
57
|
+
# 订阅 response_received 方法
|
|
58
|
+
if hasattr(extension, 'response_received'):
|
|
59
|
+
# 修复:将事件名称从 "request_received" 更正为 "response_received"
|
|
60
|
+
self.crawler.subscriber.subscribe(extension.response_received, event="response_received")
|
|
61
|
+
|
|
62
|
+
# 订阅 request_scheduled 方法
|
|
63
|
+
if hasattr(extension, 'request_scheduled'):
|
|
64
|
+
self.crawler.subscriber.subscribe(extension.request_scheduled, event="request_scheduled")
|
crawlo/extension/log_interval.py
CHANGED
|
@@ -15,11 +15,23 @@ class LogIntervalExtension(object):
|
|
|
15
15
|
self.item_count = 0
|
|
16
16
|
self.response_count = 0
|
|
17
17
|
self.seconds = crawler.settings.get('INTERVAL', 60) # 默认60秒
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
18
|
+
|
|
19
|
+
# 修复时间单位计算逻辑
|
|
20
|
+
if self.seconds % 60 == 0:
|
|
21
|
+
self.interval = int(self.seconds / 60)
|
|
22
|
+
self.unit = 'min'
|
|
23
|
+
else:
|
|
24
|
+
self.interval = self.seconds
|
|
25
|
+
self.unit = 's'
|
|
26
|
+
|
|
27
|
+
# 处理单数情况
|
|
28
|
+
if self.interval == 1 and self.unit == 'min':
|
|
29
|
+
self.interval_display = ""
|
|
30
|
+
else:
|
|
31
|
+
self.interval_display = str(self.interval)
|
|
21
32
|
|
|
22
33
|
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
34
|
+
self.logger.info(f"LogIntervalExtension initialized with interval: {self.seconds} seconds")
|
|
23
35
|
|
|
24
36
|
@classmethod
|
|
25
37
|
def create_instance(cls, crawler: Any) -> 'LogIntervalExtension':
|
|
@@ -29,9 +41,12 @@ class LogIntervalExtension(object):
|
|
|
29
41
|
return o
|
|
30
42
|
|
|
31
43
|
async def spider_opened(self) -> None:
|
|
44
|
+
self.logger.info("Spider opened, starting interval logging task")
|
|
32
45
|
self.task = asyncio.create_task(self.interval_log())
|
|
46
|
+
self.logger.info("Interval logging task started")
|
|
33
47
|
|
|
34
48
|
async def spider_closed(self) -> None:
|
|
49
|
+
self.logger.info("Spider closed, stopping interval logging task")
|
|
35
50
|
if self.task:
|
|
36
51
|
self.task.cancel()
|
|
37
52
|
try:
|
|
@@ -41,17 +56,39 @@ class LogIntervalExtension(object):
|
|
|
41
56
|
self.task = None
|
|
42
57
|
|
|
43
58
|
async def interval_log(self) -> None:
|
|
59
|
+
iteration = 0
|
|
44
60
|
while True:
|
|
45
61
|
try:
|
|
62
|
+
iteration += 1
|
|
63
|
+
self.logger.debug(f"Interval log iteration {iteration} starting")
|
|
46
64
|
last_item_count = self.stats.get_value('item_successful_count', default=0)
|
|
47
65
|
last_response_count = self.stats.get_value('response_received_count', default=0)
|
|
48
66
|
item_rate = last_item_count - self.item_count
|
|
49
67
|
response_rate = last_response_count - self.response_count
|
|
68
|
+
|
|
69
|
+
# 添加调试信息
|
|
70
|
+
self.logger.debug(f"Debug info - Iteration: {iteration}, Last item count: {last_item_count}, Last response count: {last_response_count}")
|
|
71
|
+
self.logger.debug(f"Debug info - Previous item count: {self.item_count}, Previous response count: {self.response_count}")
|
|
72
|
+
self.logger.debug(f"Debug info - Item rate: {item_rate}, Response rate: {response_rate}")
|
|
73
|
+
|
|
50
74
|
self.item_count, self.response_count = last_item_count, last_response_count
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
75
|
+
|
|
76
|
+
# 修复效率计算,确保使用正确的单位
|
|
77
|
+
if self.unit == 'min' and self.seconds > 0:
|
|
78
|
+
# 转换为每分钟速率
|
|
79
|
+
pages_per_min = response_rate * 60 / self.seconds if self.seconds > 0 else 0
|
|
80
|
+
items_per_min = item_rate * 60 / self.seconds if self.seconds > 0 else 0
|
|
81
|
+
self.logger.info(
|
|
82
|
+
f'Crawled {last_response_count} pages (at {pages_per_min:.0f} pages/min),'
|
|
83
|
+
f' Got {last_item_count} items (at {items_per_min:.0f} items/min).'
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
# 使用原始单位
|
|
87
|
+
self.logger.info(
|
|
88
|
+
f'Crawled {last_response_count} pages (at {response_rate} pages/{self.interval_display}{self.unit}),'
|
|
89
|
+
f' Got {last_item_count} items (at {item_rate} items/{self.interval_display}{self.unit}).'
|
|
90
|
+
)
|
|
91
|
+
self.logger.debug(f"Interval log iteration {iteration} completed, sleeping for {self.seconds} seconds")
|
|
55
92
|
await asyncio.sleep(self.seconds)
|
|
56
93
|
except Exception as e:
|
|
57
94
|
self.logger.error(f"Error in interval logging: {e}")
|
crawlo/extension/log_stats.py
CHANGED
|
@@ -1,52 +1,43 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
日志统计扩展
|
|
5
|
+
提供详细的爬虫运行统计信息
|
|
6
|
+
"""
|
|
7
|
+
import asyncio
|
|
3
8
|
from typing import Any
|
|
4
9
|
|
|
5
|
-
from crawlo import
|
|
6
|
-
from crawlo.
|
|
10
|
+
from crawlo.utils.log import get_logger
|
|
11
|
+
from crawlo.utils import now, time_diff
|
|
7
12
|
|
|
8
13
|
|
|
9
|
-
class LogStats
|
|
14
|
+
class LogStats:
|
|
15
|
+
"""
|
|
16
|
+
日志统计扩展,记录和输出爬虫运行过程中的各种统计信息
|
|
17
|
+
"""
|
|
10
18
|
|
|
11
|
-
def __init__(self,
|
|
12
|
-
self.
|
|
19
|
+
def __init__(self, crawler):
|
|
20
|
+
self.crawler = crawler
|
|
21
|
+
self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
|
|
22
|
+
self._stats = crawler.stats
|
|
23
|
+
self._stats['start_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
|
|
13
24
|
|
|
14
25
|
@classmethod
|
|
15
|
-
def
|
|
16
|
-
|
|
17
|
-
# 订阅所有需要的事件
|
|
18
|
-
event_subscriptions = [
|
|
19
|
-
(o.spider_opened, event.spider_opened),
|
|
20
|
-
(o.spider_closed, event.spider_closed),
|
|
21
|
-
(o.item_successful, event.item_successful),
|
|
22
|
-
(o.item_discard, event.item_discard),
|
|
23
|
-
(o.response_received, event.response_received),
|
|
24
|
-
(o.request_scheduled, event.request_scheduled),
|
|
25
|
-
]
|
|
26
|
-
|
|
27
|
-
for handler, evt in event_subscriptions:
|
|
28
|
-
try:
|
|
29
|
-
crawler.subscriber.subscribe(handler, event=evt)
|
|
30
|
-
except Exception as e:
|
|
31
|
-
# 获取日志记录器并记录错误
|
|
32
|
-
from crawlo.utils.log import get_logger
|
|
33
|
-
logger = get_logger(cls.__name__)
|
|
34
|
-
logger.error(f"Failed to subscribe to event {evt}: {e}")
|
|
26
|
+
def from_crawler(cls, crawler):
|
|
27
|
+
return cls(crawler)
|
|
35
28
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
try:
|
|
40
|
-
self._stats['start_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
|
|
41
|
-
except Exception as e:
|
|
42
|
-
# 静默处理,避免影响爬虫运行
|
|
43
|
-
pass
|
|
29
|
+
@classmethod
|
|
30
|
+
def create_instance(cls, crawler):
|
|
31
|
+
return cls.from_crawler(crawler)
|
|
44
32
|
|
|
45
|
-
async def spider_closed(self) -> None:
|
|
33
|
+
async def spider_closed(self, reason: str = 'finished') -> None:
|
|
46
34
|
try:
|
|
47
35
|
self._stats['end_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
|
|
48
36
|
self._stats['cost_time(s)'] = time_diff(start=self._stats['start_time'], end=self._stats['end_time'])
|
|
37
|
+
self._stats['reason'] = reason
|
|
49
38
|
except Exception as e:
|
|
39
|
+
# 添加日志以便调试
|
|
40
|
+
self.logger.error(f"Error in spider_closed: {e}")
|
|
50
41
|
# 静默处理,避免影响爬虫运行
|
|
51
42
|
pass
|
|
52
43
|
|
|
@@ -59,10 +50,8 @@ class LogStats(object):
|
|
|
59
50
|
|
|
60
51
|
async def item_discard(self, _item: Any, exc: Any, _spider: Any) -> None:
|
|
61
52
|
try:
|
|
53
|
+
# 只增加总的丢弃计数,不记录每个丢弃项目的原因详情
|
|
62
54
|
self._stats.inc_value('item_discard_count')
|
|
63
|
-
reason = getattr(exc, 'msg', None) # 更安全地获取属性
|
|
64
|
-
if reason:
|
|
65
|
-
self._stats.inc_value(f"item_discard/{reason}")
|
|
66
55
|
except Exception as e:
|
|
67
56
|
# 静默处理,避免影响爬虫运行
|
|
68
57
|
pass
|
|
@@ -16,25 +16,29 @@ from .context import InitializationContext
|
|
|
16
16
|
from .core import CoreInitializer
|
|
17
17
|
from .phases import InitializationPhase
|
|
18
18
|
|
|
19
|
+
|
|
19
20
|
# 公共接口
|
|
20
21
|
def initialize_framework(settings=None, **kwargs):
|
|
21
22
|
"""初始化框架的主要入口"""
|
|
22
23
|
return CoreInitializer().initialize(settings, **kwargs)
|
|
23
24
|
|
|
25
|
+
|
|
24
26
|
def is_framework_ready():
|
|
25
27
|
"""检查框架是否已准备就绪"""
|
|
26
28
|
return CoreInitializer().is_ready
|
|
27
29
|
|
|
30
|
+
|
|
28
31
|
def get_framework_context():
|
|
29
32
|
"""获取框架初始化上下文"""
|
|
30
33
|
return CoreInitializer().context
|
|
31
34
|
|
|
35
|
+
|
|
32
36
|
__all__ = [
|
|
33
37
|
'InitializerRegistry',
|
|
34
|
-
'InitializationContext',
|
|
38
|
+
'InitializationContext',
|
|
35
39
|
'CoreInitializer',
|
|
36
40
|
'InitializationPhase',
|
|
37
41
|
'initialize_framework',
|
|
38
42
|
'is_framework_ready',
|
|
39
43
|
'get_framework_context'
|
|
40
|
-
]
|
|
44
|
+
]
|
|
@@ -86,7 +86,7 @@ class MiddlewareManager:
|
|
|
86
86
|
response = await self._process_exception(request, exp)
|
|
87
87
|
else:
|
|
88
88
|
create_task(self.crawler.subscriber.notify(response_received, response, self.crawler.spider))
|
|
89
|
-
|
|
89
|
+
self._stats.inc_value('response_received_count')
|
|
90
90
|
if isinstance(response, Response):
|
|
91
91
|
response = await self._process_response(request, response)
|
|
92
92
|
if isinstance(response, Request):
|
|
@@ -127,9 +127,7 @@ class ResponseCodeMiddleware(object):
|
|
|
127
127
|
"""
|
|
128
128
|
status_code = response.status_code
|
|
129
129
|
|
|
130
|
-
#
|
|
131
|
-
self.stats.inc_value(f'response_status_code/count/{status_code}')
|
|
132
|
-
|
|
130
|
+
# 只记录总的统计信息,不记录每个域名和每个状态码的详细信息
|
|
133
131
|
# 记录状态码分类统计
|
|
134
132
|
category = self._get_status_category(status_code)
|
|
135
133
|
self.stats.inc_value(f'response_status_code/category/{category}')
|
|
@@ -144,17 +142,6 @@ class ResponseCodeMiddleware(object):
|
|
|
144
142
|
if hasattr(response, 'content_length') and response.content_length:
|
|
145
143
|
self.stats.inc_value('response_total_bytes', response.content_length)
|
|
146
144
|
|
|
147
|
-
# 记录域名统计
|
|
148
|
-
try:
|
|
149
|
-
from urllib.parse import urlparse
|
|
150
|
-
parsed_url = urlparse(response.url)
|
|
151
|
-
domain = parsed_url.netloc
|
|
152
|
-
if domain:
|
|
153
|
-
self.stats.inc_value(f'response_status_code/domain/{domain}/count/{status_code}')
|
|
154
|
-
self.stats.inc_value(f'response_status_code/domain/{domain}/category/{category}')
|
|
155
|
-
except Exception:
|
|
156
|
-
self.stats.inc_value('response_status_code/domain/invalid_url/count/{status_code}')
|
|
157
|
-
|
|
158
145
|
# 详细日志记录
|
|
159
146
|
self.logger.debug(
|
|
160
147
|
f'收到响应: {status_code} {response.url} '
|
crawlo/mode_manager.py
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
|
|
8
8
|
支持的运行模式:
|
|
9
9
|
1. standalone - 单机模式(默认)
|
|
10
|
-
2. distributed - 分布式模式
|
|
10
|
+
2. distributed - 分布式模式
|
|
11
11
|
3. auto - 自动检测模式
|
|
12
12
|
"""
|
|
13
13
|
import os
|
|
@@ -29,7 +29,7 @@ class ModeManager:
|
|
|
29
29
|
# 延迟初始化logger,避免循环依赖
|
|
30
30
|
self._logger = None
|
|
31
31
|
self._debug("运行模式管理器初始化完成")
|
|
32
|
-
|
|
32
|
+
|
|
33
33
|
def _get_logger(self):
|
|
34
34
|
"""延迟获取logger实例"""
|
|
35
35
|
if self._logger is None:
|
|
@@ -40,7 +40,7 @@ class ModeManager:
|
|
|
40
40
|
# 如果日志系统尚未初始化,返回None
|
|
41
41
|
pass
|
|
42
42
|
return self._logger
|
|
43
|
-
|
|
43
|
+
|
|
44
44
|
def _debug(self, message: str):
|
|
45
45
|
"""调试日志"""
|
|
46
46
|
logger = self._get_logger()
|
|
@@ -73,7 +73,7 @@ class ModeManager:
|
|
|
73
73
|
redis_url = f'redis://:{redis_password}@{redis_host}:{redis_port}/{redis_db}'
|
|
74
74
|
else:
|
|
75
75
|
redis_url = f'redis://{redis_host}:{redis_port}/{redis_db}'
|
|
76
|
-
|
|
76
|
+
|
|
77
77
|
return {
|
|
78
78
|
'QUEUE_TYPE': 'redis',
|
|
79
79
|
'FILTER_CLASS': 'crawlo.filters.aioredis_filter.AioRedisFilter',
|
|
@@ -119,6 +119,7 @@ class ModeManager:
|
|
|
119
119
|
|
|
120
120
|
if mode == RunMode.STANDALONE:
|
|
121
121
|
mode_info = "使用单机模式 - 简单快速,适合开发和中小规模爬取"
|
|
122
|
+
# 对于单机模式,如果用户设置了QUEUE_TYPE为'auto',应该保留用户的设置
|
|
122
123
|
settings = self.get_standalone_settings()
|
|
123
124
|
self._debug("应用单机模式配置")
|
|
124
125
|
|
|
@@ -142,8 +143,13 @@ class ModeManager:
|
|
|
142
143
|
raise ValueError(f"不支持的运行模式: {mode}")
|
|
143
144
|
|
|
144
145
|
# 合并用户自定义配置
|
|
145
|
-
user_settings = {
|
|
146
|
-
|
|
146
|
+
user_settings = {
|
|
147
|
+
k: v for k,
|
|
148
|
+
v in kwargs.items() if k not in [
|
|
149
|
+
'redis_host',
|
|
150
|
+
'redis_port',
|
|
151
|
+
'redis_password',
|
|
152
|
+
'project_name']}
|
|
147
153
|
settings.update(user_settings)
|
|
148
154
|
self._debug(f"合并用户自定义配置: {list(user_settings.keys())}")
|
|
149
155
|
|
|
@@ -210,4 +216,4 @@ def auto_mode(**kwargs) -> Dict[str, Any]:
|
|
|
210
216
|
def from_env(default_mode: str = 'standalone') -> Dict[str, Any]:
|
|
211
217
|
"""从环境变量创建配置"""
|
|
212
218
|
# 移除直接使用 os.getenv(),要求通过 settings 配置
|
|
213
|
-
raise RuntimeError("环境变量配置已移除,请在 settings 中配置相关参数")
|
|
219
|
+
raise RuntimeError("环境变量配置已移除,请在 settings 中配置相关参数")
|
|
@@ -38,6 +38,7 @@ except ImportError:
|
|
|
38
38
|
|
|
39
39
|
from crawlo import Item
|
|
40
40
|
from crawlo.spider import Spider
|
|
41
|
+
from crawlo.utils.fingerprint import FingerprintGenerator
|
|
41
42
|
from crawlo.utils.log import get_logger
|
|
42
43
|
from crawlo.exceptions import DropItem, ItemDiscard
|
|
43
44
|
|
|
@@ -109,6 +110,9 @@ class BloomDedupPipeline:
|
|
|
109
110
|
self.logger.debug(f"Processing new item: {fingerprint[:20]}...")
|
|
110
111
|
return item
|
|
111
112
|
|
|
113
|
+
except ItemDiscard:
|
|
114
|
+
# 重新抛出ItemDiscard异常,确保管道管理器能正确处理
|
|
115
|
+
raise
|
|
112
116
|
except Exception as e:
|
|
113
117
|
self.logger.error(f"Error processing item: {e}")
|
|
114
118
|
# 在错误时继续处理,避免丢失数据
|
|
@@ -123,21 +127,7 @@ class BloomDedupPipeline:
|
|
|
123
127
|
:param item: 数据项
|
|
124
128
|
:return: 指纹字符串
|
|
125
129
|
"""
|
|
126
|
-
|
|
127
|
-
try:
|
|
128
|
-
item_dict = item.to_dict()
|
|
129
|
-
except AttributeError:
|
|
130
|
-
# 兼容没有to_dict方法的Item实现
|
|
131
|
-
item_dict = dict(item)
|
|
132
|
-
|
|
133
|
-
# 对字典进行排序以确保一致性
|
|
134
|
-
sorted_items = sorted(item_dict.items())
|
|
135
|
-
|
|
136
|
-
# 生成指纹字符串
|
|
137
|
-
fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
|
|
138
|
-
|
|
139
|
-
# 使用 SHA256 生成固定长度的指纹
|
|
140
|
-
return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
|
|
130
|
+
return FingerprintGenerator.item_fingerprint(item)
|
|
141
131
|
|
|
142
132
|
def close_spider(self, spider: Spider) -> None:
|
|
143
133
|
"""
|
|
@@ -17,6 +17,7 @@ import aiomysql
|
|
|
17
17
|
from crawlo import Item
|
|
18
18
|
from crawlo.exceptions import DropItem, ItemDiscard
|
|
19
19
|
from crawlo.spider import Spider
|
|
20
|
+
from crawlo.utils.fingerprint import FingerprintGenerator
|
|
20
21
|
from crawlo.utils.log import get_logger
|
|
21
22
|
|
|
22
23
|
|
|
@@ -140,6 +141,9 @@ class DatabaseDedupPipeline:
|
|
|
140
141
|
self.logger.debug(f"Processing new item: {fingerprint[:20]}...")
|
|
141
142
|
return item
|
|
142
143
|
|
|
144
|
+
except ItemDiscard:
|
|
145
|
+
# 重新抛出ItemDiscard异常,确保管道管理器能正确处理
|
|
146
|
+
raise
|
|
143
147
|
except Exception as e:
|
|
144
148
|
self.logger.error(f"Error processing item: {e}")
|
|
145
149
|
# 在错误时继续处理,避免丢失数据
|
|
@@ -190,11 +194,4 @@ class DatabaseDedupPipeline:
|
|
|
190
194
|
:param item: 数据项
|
|
191
195
|
:return: 指纹字符串
|
|
192
196
|
"""
|
|
193
|
-
|
|
194
|
-
try:
|
|
195
|
-
item_dict = item.to_dict()
|
|
196
|
-
except AttributeError:
|
|
197
|
-
# 兼容没有to_dict方法的Item实现
|
|
198
|
-
item_dict = dict(item)
|
|
199
|
-
|
|
200
|
-
# 对字典进行排序以确保一致性
|
|
197
|
+
return FingerprintGenerator.item_fingerprint(item)
|
|
@@ -18,6 +18,7 @@ from typing import Set
|
|
|
18
18
|
from crawlo import Item
|
|
19
19
|
from crawlo.exceptions import DropItem, ItemDiscard
|
|
20
20
|
from crawlo.spider import Spider
|
|
21
|
+
from crawlo.utils.fingerprint import FingerprintGenerator
|
|
21
22
|
from crawlo.utils.log import get_logger
|
|
22
23
|
|
|
23
24
|
|
|
@@ -71,6 +72,9 @@ class MemoryDedupPipeline:
|
|
|
71
72
|
self.logger.debug(f"Processing new item: {fingerprint[:20]}...")
|
|
72
73
|
return item
|
|
73
74
|
|
|
75
|
+
except ItemDiscard:
|
|
76
|
+
# 重新抛出ItemDiscard异常,确保管道管理器能正确处理
|
|
77
|
+
raise
|
|
74
78
|
except Exception as e:
|
|
75
79
|
self.logger.error(f"Error processing item: {e}")
|
|
76
80
|
# 在错误时继续处理,避免丢失数据
|
|
@@ -85,21 +89,7 @@ class MemoryDedupPipeline:
|
|
|
85
89
|
:param item: 数据项
|
|
86
90
|
:return: 指纹字符串
|
|
87
91
|
"""
|
|
88
|
-
|
|
89
|
-
try:
|
|
90
|
-
item_dict = item.to_dict()
|
|
91
|
-
except AttributeError:
|
|
92
|
-
# 兼容没有to_dict方法的Item实现
|
|
93
|
-
item_dict = dict(item)
|
|
94
|
-
|
|
95
|
-
# 对字典进行排序以确保一致性
|
|
96
|
-
sorted_items = sorted(item_dict.items())
|
|
97
|
-
|
|
98
|
-
# 生成指纹字符串
|
|
99
|
-
fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
|
|
100
|
-
|
|
101
|
-
# 使用 SHA256 生成固定长度的指纹
|
|
102
|
-
return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
|
|
92
|
+
return FingerprintGenerator.item_fingerprint(item)
|
|
103
93
|
|
|
104
94
|
def close_spider(self, spider: Spider) -> None:
|
|
105
95
|
"""
|
|
@@ -66,11 +66,19 @@ class PipelineManager:
|
|
|
66
66
|
|
|
67
67
|
async def process_item(self, item):
|
|
68
68
|
try:
|
|
69
|
-
for method in self.methods:
|
|
70
|
-
item
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
69
|
+
for i, method in enumerate(self.methods):
|
|
70
|
+
self.logger.debug(f"Processing item with pipeline method {i}: {method.__qualname__}")
|
|
71
|
+
try:
|
|
72
|
+
item = await common_call(method, item, self.crawler.spider)
|
|
73
|
+
if item is None:
|
|
74
|
+
raise InvalidOutputError(f"{method.__qualname__} return None is not supported.")
|
|
75
|
+
except (ItemDiscard, DropItem) as exc: # 同时捕获两种异常类型
|
|
76
|
+
self.logger.debug(f"Item discarded by pipeline: {exc}")
|
|
77
|
+
create_task(self.crawler.subscriber.notify(item_discard, item, exc, self.crawler.spider))
|
|
78
|
+
# 重新抛出异常,确保上层调用者也能捕获到,并停止执行后续管道
|
|
79
|
+
raise
|
|
80
|
+
except (ItemDiscard, DropItem):
|
|
81
|
+
# 异常已经被处理和通知,这里只需要重新抛出
|
|
82
|
+
raise
|
|
75
83
|
else:
|
|
76
|
-
create_task(self.crawler.subscriber.notify(item_successful, item, self.crawler.spider))
|
|
84
|
+
create_task(self.crawler.subscriber.notify(item_successful, item, self.crawler.spider))
|