crawlo 1.3.9__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +9 -4
- crawlo/__version__.py +1 -1
- crawlo/commands/run.py +1 -1
- crawlo/core/__init__.py +8 -2
- crawlo/core/processor.py +11 -3
- crawlo/core/scheduler.py +2 -2
- crawlo/crawler.py +12 -0
- crawlo/extension/__init__.py +25 -0
- crawlo/extension/log_interval.py +44 -7
- crawlo/extension/log_stats.py +26 -37
- crawlo/initialization/__init__.py +6 -2
- crawlo/middleware/middleware_manager.py +1 -1
- crawlo/middleware/response_code.py +1 -14
- crawlo/mode_manager.py +13 -7
- crawlo/pipelines/bloom_dedup_pipeline.py +5 -15
- crawlo/pipelines/database_dedup_pipeline.py +5 -8
- crawlo/pipelines/memory_dedup_pipeline.py +5 -15
- crawlo/pipelines/pipeline_manager.py +15 -7
- crawlo/pipelines/redis_dedup_pipeline.py +7 -17
- crawlo/project.py +18 -7
- crawlo/settings/default_settings.py +114 -150
- crawlo/settings/setting_manager.py +14 -9
- crawlo/tools/distributed_coordinator.py +4 -8
- crawlo/utils/fingerprint.py +123 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/METADATA +1 -1
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/RECORD +51 -35
- examples/test_project/__init__.py +7 -0
- examples/test_project/run.py +35 -0
- examples/test_project/test_project/__init__.py +4 -0
- examples/test_project/test_project/items.py +18 -0
- examples/test_project/test_project/middlewares.py +119 -0
- examples/test_project/test_project/pipelines.py +97 -0
- examples/test_project/test_project/settings.py +170 -0
- examples/test_project/test_project/spiders/__init__.py +10 -0
- examples/test_project/test_project/spiders/of_week_dis.py +144 -0
- tests/debug_framework_logger.py +1 -1
- tests/debug_log_levels.py +1 -1
- tests/test_all_pipeline_fingerprints.py +134 -0
- tests/test_default_header_middleware.py +242 -87
- tests/test_fingerprint_consistency.py +136 -0
- tests/test_fingerprint_simple.py +52 -0
- tests/test_framework_logger.py +1 -1
- tests/test_framework_startup.py +1 -1
- tests/test_hash_performance.py +100 -0
- tests/test_mode_change.py +1 -1
- tests/test_offsite_middleware.py +185 -162
- tests/test_offsite_middleware_simple.py +204 -0
- tests/test_pipeline_fingerprint_consistency.py +87 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/WHEEL +0 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.3.9.dist-info → crawlo-1.4.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
test_project 项目配置文件(分布式版)
|
|
4
|
+
=============================
|
|
5
|
+
基于 Crawlo 框架的分布式爬虫项目配置。
|
|
6
|
+
适合大规模数据采集和多节点部署。
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
# ============================== 项目基本信息 ==============================
|
|
12
|
+
PROJECT_NAME = 'test_project'
|
|
13
|
+
|
|
14
|
+
# ============================== 运行模式 ==============================
|
|
15
|
+
RUN_MODE = 'distributed'
|
|
16
|
+
|
|
17
|
+
# ============================== 并发配置 ==============================
|
|
18
|
+
CONCURRENCY = 16
|
|
19
|
+
MAX_RUNNING_SPIDERS = 5
|
|
20
|
+
DOWNLOAD_DELAY = 1.0
|
|
21
|
+
|
|
22
|
+
# ============================== 下载器配置 ==============================
|
|
23
|
+
# 可选下载器:
|
|
24
|
+
# DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
|
|
25
|
+
# DOWNLOADER = 'crawlo.downloader.httpx_downloader.HttpXDownloader'
|
|
26
|
+
# DOWNLOADER = 'crawlo.downloader.cffi_downloader.CurlCffiDownloader'
|
|
27
|
+
DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
|
|
28
|
+
|
|
29
|
+
# ============================== 队列配置 ==============================
|
|
30
|
+
QUEUE_TYPE = 'redis'
|
|
31
|
+
# 当使用Redis队列时,可自定义队列名称
|
|
32
|
+
# 队列名称遵循统一命名规范: crawlo:{PROJECT_NAME}:queue:requests
|
|
33
|
+
# SCHEDULER_QUEUE_NAME = f'crawlo:{PROJECT_NAME}:queue:requests'
|
|
34
|
+
|
|
35
|
+
# ============================== 去重过滤器 ==============================
|
|
36
|
+
FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter'
|
|
37
|
+
|
|
38
|
+
# ============================== 默认去重管道 ==============================
|
|
39
|
+
DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.redis_dedup_pipeline.RedisDedupPipeline'
|
|
40
|
+
|
|
41
|
+
# ============================== 爬虫模块配置 ==============================
|
|
42
|
+
SPIDER_MODULES = ['test_project.spiders']
|
|
43
|
+
|
|
44
|
+
# ============================== 中间件 ==============================
|
|
45
|
+
# MIDDLEWARES = [
|
|
46
|
+
# 'crawlo.middleware.simple_proxy.SimpleProxyMiddleware',
|
|
47
|
+
# ]
|
|
48
|
+
|
|
49
|
+
# ============================== 默认请求头配置 ==============================
|
|
50
|
+
# 为DefaultHeaderMiddleware配置默认请求头
|
|
51
|
+
DEFAULT_REQUEST_HEADERS = {
|
|
52
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
53
|
+
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
|
54
|
+
'Accept-Encoding': 'gzip, deflate, br',
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# ============================== 允许的域名 ==============================
|
|
58
|
+
# 为OffsiteMiddleware配置允许的域名
|
|
59
|
+
# ALLOWED_DOMAINS = ['example.com']
|
|
60
|
+
|
|
61
|
+
# ============================== 数据管道 ==============================
|
|
62
|
+
# PIPELINES = [
|
|
63
|
+
# 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(使用asyncmy异步库)
|
|
64
|
+
# ]
|
|
65
|
+
|
|
66
|
+
# ============================== 扩展组件 ==============================
|
|
67
|
+
# EXTENSIONS = [
|
|
68
|
+
# 'crawlo.extension.log_interval.LogIntervalExtension',
|
|
69
|
+
# 'crawlo.extension.log_stats.LogStats',
|
|
70
|
+
# 'crawlo.extension.logging_extension.CustomLoggerExtension',
|
|
71
|
+
# ]
|
|
72
|
+
|
|
73
|
+
# ============================== 日志配置 ==============================
|
|
74
|
+
LOG_LEVEL = 'INFO'
|
|
75
|
+
LOG_FILE = 'logs/test_project.log'
|
|
76
|
+
LOG_ENCODING = 'utf-8' # 明确指定日志文件编码
|
|
77
|
+
STATS_DUMP = True
|
|
78
|
+
|
|
79
|
+
# ============================== 输出配置 ==============================
|
|
80
|
+
OUTPUT_DIR = 'output'
|
|
81
|
+
|
|
82
|
+
# ============================== Redis配置 ==============================
|
|
83
|
+
REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
|
|
84
|
+
REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
|
|
85
|
+
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
|
|
86
|
+
REDIS_DB = int(os.getenv('REDIS_DB', 0))
|
|
87
|
+
|
|
88
|
+
# 根据是否有密码生成 URL
|
|
89
|
+
if REDIS_PASSWORD:
|
|
90
|
+
REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
|
|
91
|
+
else:
|
|
92
|
+
REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
|
|
93
|
+
|
|
94
|
+
# ============================== MySQL配置 ==============================
|
|
95
|
+
MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
|
|
96
|
+
MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
|
|
97
|
+
MYSQL_USER = os.getenv('MYSQL_USER', 'root')
|
|
98
|
+
MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
|
|
99
|
+
MYSQL_DB = os.getenv('MYSQL_DB', 'test_project')
|
|
100
|
+
MYSQL_TABLE = 'test_project_data'
|
|
101
|
+
MYSQL_BATCH_SIZE = 100
|
|
102
|
+
MYSQL_USE_BATCH = True # 是否启用批量插入
|
|
103
|
+
|
|
104
|
+
# ============================== MongoDB配置 ==============================
|
|
105
|
+
MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
|
|
106
|
+
MONGO_DATABASE = 'test_project_db'
|
|
107
|
+
MONGO_COLLECTION = 'test_project_items'
|
|
108
|
+
MONGO_MAX_POOL_SIZE = 200
|
|
109
|
+
MONGO_MIN_POOL_SIZE = 20
|
|
110
|
+
MONGO_BATCH_SIZE = 100 # 批量插入条数
|
|
111
|
+
MONGO_USE_BATCH = True # 是否启用批量插入
|
|
112
|
+
|
|
113
|
+
# ============================== 代理配置 ==============================
|
|
114
|
+
# 代理功能默认不启用,如需使用请在项目配置文件中启用并配置相关参数
|
|
115
|
+
PROXY_ENABLED = False # 是否启用代理
|
|
116
|
+
|
|
117
|
+
# 简化版代理配置(适用于SimpleProxyMiddleware)
|
|
118
|
+
PROXY_LIST = [] # 代理列表,例如: ["http://proxy1:8080", "http://proxy2:8080"]
|
|
119
|
+
|
|
120
|
+
# 高级代理配置(适用于ProxyMiddleware)
|
|
121
|
+
PROXY_API_URL = "" # 代理获取接口(请替换为真实地址)
|
|
122
|
+
|
|
123
|
+
# 代理提取方式(支持字段路径或函数)
|
|
124
|
+
# 示例: "proxy" 适用于 {"proxy": "http://1.1.1.1:8080"}
|
|
125
|
+
# 示例: "data.proxy" 适用于 {"data": {"proxy": "http://1.1.1.1:8080"}}
|
|
126
|
+
PROXY_EXTRACTOR = "proxy"
|
|
127
|
+
|
|
128
|
+
# 代理刷新控制
|
|
129
|
+
PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
|
|
130
|
+
PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
|
|
131
|
+
|
|
132
|
+
# ============================== Curl-Cffi 特有配置 ==============================
|
|
133
|
+
# 浏览器指纹模拟(仅 CurlCffi 下载器有效)
|
|
134
|
+
CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
|
|
135
|
+
|
|
136
|
+
# 自定义浏览器版本映射(可覆盖默认行为)
|
|
137
|
+
CURL_BROWSER_VERSION_MAP = {
|
|
138
|
+
"chrome": "chrome136",
|
|
139
|
+
"edge": "edge101",
|
|
140
|
+
"safari": "safari184",
|
|
141
|
+
"firefox": "firefox135",
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
# ============================== 下载器优化配置 ==============================
|
|
145
|
+
# 下载器健康检查
|
|
146
|
+
DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
|
|
147
|
+
HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
|
|
148
|
+
|
|
149
|
+
# 请求统计配置
|
|
150
|
+
REQUEST_STATS_ENABLED = True # 是否启用请求统计
|
|
151
|
+
STATS_RESET_ON_START = False # 启动时是否重置统计
|
|
152
|
+
|
|
153
|
+
# HttpX 下载器专用配置
|
|
154
|
+
HTTPX_HTTP2 = True # 是否启用HTTP/2支持
|
|
155
|
+
HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
|
|
156
|
+
|
|
157
|
+
# AioHttp 下载器专用配置
|
|
158
|
+
AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
|
|
159
|
+
AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
|
|
160
|
+
|
|
161
|
+
# 通用优化配置
|
|
162
|
+
CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
|
|
163
|
+
CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
|
|
164
|
+
|
|
165
|
+
# ============================== 内存监控配置 ==============================
|
|
166
|
+
# 内存监控扩展默认不启用,如需使用请在项目配置文件中启用
|
|
167
|
+
MEMORY_MONITOR_ENABLED = False # 是否启用内存监控
|
|
168
|
+
MEMORY_MONITOR_INTERVAL = 60 # 内存监控检查间隔(秒)
|
|
169
|
+
MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用率警告阈值(百分比)
|
|
170
|
+
MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用率严重阈值(百分比)
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
test_project.spiders.of_week_dis
|
|
4
|
+
=======================================
|
|
5
|
+
由 `crawlo genspider` 命令生成的爬虫。
|
|
6
|
+
基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
|
|
7
|
+
|
|
8
|
+
使用示例:
|
|
9
|
+
crawlo crawl of_week_dis
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from crawlo.spider import Spider
|
|
13
|
+
from crawlo import Request
|
|
14
|
+
from ..items import ExampleItem
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OfweekdisSpider(Spider):
|
|
18
|
+
"""
|
|
19
|
+
爬虫:of_week_dis
|
|
20
|
+
|
|
21
|
+
功能说明:
|
|
22
|
+
- 支持并发爬取
|
|
23
|
+
- 自动去重过滤
|
|
24
|
+
- 错误重试机制
|
|
25
|
+
- 数据管道处理
|
|
26
|
+
"""
|
|
27
|
+
name = 'of_week_dis'
|
|
28
|
+
allowed_domains = ['ee.ofweek.com']
|
|
29
|
+
start_urls = ['https://ee.ofweek.com/']
|
|
30
|
+
|
|
31
|
+
# 高级配置(可选)
|
|
32
|
+
# custom_settings = {
|
|
33
|
+
# 'DOWNLOAD_DELAY': 2.0,
|
|
34
|
+
# 'CONCURRENCY': 4,
|
|
35
|
+
# 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
|
|
36
|
+
# 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
|
|
37
|
+
# 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
|
|
38
|
+
# }
|
|
39
|
+
|
|
40
|
+
def start_requests(self):
|
|
41
|
+
"""
|
|
42
|
+
生成初始请求。
|
|
43
|
+
|
|
44
|
+
支持自定义请求头、代理、优先级等。
|
|
45
|
+
"""
|
|
46
|
+
headers = {
|
|
47
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
48
|
+
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
for url in self.start_urls:
|
|
52
|
+
yield Request(
|
|
53
|
+
url=url,
|
|
54
|
+
callback=self.parse,
|
|
55
|
+
headers=headers,
|
|
56
|
+
# meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
|
|
57
|
+
# priority=10, # 请求优先级(数字越大优先级越高)
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def parse(self, response):
|
|
61
|
+
"""
|
|
62
|
+
解析响应的主方法。
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
response: 响应对象,包含页面内容和元数据
|
|
66
|
+
|
|
67
|
+
Yields:
|
|
68
|
+
Request: 新的请求对象(用于深度爬取)
|
|
69
|
+
Item: 数据项对象(用于数据存储)
|
|
70
|
+
"""
|
|
71
|
+
self.logger.info(f'正在解析页面: {response.url}')
|
|
72
|
+
|
|
73
|
+
# ================== 数据提取示例 ==================
|
|
74
|
+
|
|
75
|
+
# 提取数据并创建 Item
|
|
76
|
+
# item = Item()
|
|
77
|
+
# item['title'] = response.xpath('//title/text()').get(default='')
|
|
78
|
+
# item['url'] = response.url
|
|
79
|
+
# item['content'] = response.xpath('//div[@class="content"]//text()').getall()
|
|
80
|
+
# yield item
|
|
81
|
+
|
|
82
|
+
# 直接返回字典(简单数据)
|
|
83
|
+
yield {
|
|
84
|
+
'title': response.xpath('//title/text()').get(default=''),
|
|
85
|
+
'url': response.url,
|
|
86
|
+
'status_code': response.status_code,
|
|
87
|
+
# 'description': response.xpath('//meta[@name="description"]/@content').get(),
|
|
88
|
+
# 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# ================== 链接提取示例 ==================
|
|
92
|
+
|
|
93
|
+
# 提取并跟进链接
|
|
94
|
+
# links = response.xpath('//a/@href').getall()
|
|
95
|
+
# for link in links:
|
|
96
|
+
# # 过滤有效链接
|
|
97
|
+
# if link and not link.startswith(('javascript:', 'mailto:', '#')):
|
|
98
|
+
# yield response.follow(
|
|
99
|
+
# link,
|
|
100
|
+
# callback=self.parse_detail, # 或者 self.parse 继续递归
|
|
101
|
+
# meta={'parent_url': response.url} # 传递父页面信息
|
|
102
|
+
# )
|
|
103
|
+
|
|
104
|
+
# 用 CSS 选择器提取链接
|
|
105
|
+
# for link in response.css('a.item-link::attr(href)').getall():
|
|
106
|
+
# yield response.follow(link, callback=self.parse_detail)
|
|
107
|
+
|
|
108
|
+
# ================== 分页处理示例 ==================
|
|
109
|
+
|
|
110
|
+
# 处理分页
|
|
111
|
+
# next_page = response.xpath('//a[@class="next"]/@href').get()
|
|
112
|
+
# if next_page:
|
|
113
|
+
# yield response.follow(next_page, callback=self.parse)
|
|
114
|
+
|
|
115
|
+
# 数字分页
|
|
116
|
+
# current_page = int(response.meta.get('page', 1))
|
|
117
|
+
# max_pages = 100 # 设置最大页数
|
|
118
|
+
# if current_page < max_pages:
|
|
119
|
+
# next_url = f'https://ee.ofweek.com/page/{current_page + 1}'
|
|
120
|
+
# yield Request(
|
|
121
|
+
# url=next_url,
|
|
122
|
+
# callback=self.parse,
|
|
123
|
+
# meta={'page': current_page + 1}
|
|
124
|
+
# )
|
|
125
|
+
|
|
126
|
+
def parse_detail(self, response):
|
|
127
|
+
"""
|
|
128
|
+
解析详情页面的方法(可选)。
|
|
129
|
+
|
|
130
|
+
用于处理从列表页跳转而来的详情页。
|
|
131
|
+
"""
|
|
132
|
+
self.logger.info(f'正在解析详情页: {response.url}')
|
|
133
|
+
|
|
134
|
+
# parent_url = response.meta.get('parent_url', '')
|
|
135
|
+
#
|
|
136
|
+
# yield {
|
|
137
|
+
# 'title': response.xpath('//h1/text()').get(default=''),
|
|
138
|
+
# 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
|
|
139
|
+
# 'url': response.url,
|
|
140
|
+
# 'parent_url': parent_url,
|
|
141
|
+
# 'publish_time': response.xpath('//time/@datetime').get(),
|
|
142
|
+
# }
|
|
143
|
+
|
|
144
|
+
pass
|
tests/debug_framework_logger.py
CHANGED
|
@@ -7,7 +7,7 @@ import sys
|
|
|
7
7
|
import os
|
|
8
8
|
sys.path.insert(0, '/')
|
|
9
9
|
|
|
10
|
-
from crawlo.
|
|
10
|
+
from crawlo.initialization import initialize_framework, get_framework_initializer
|
|
11
11
|
from crawlo.utils.log import get_logger, LoggerManager
|
|
12
12
|
import logging
|
|
13
13
|
|
tests/debug_log_levels.py
CHANGED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
所有去重管道指纹一致性测试
|
|
5
|
+
====================
|
|
6
|
+
验证所有去重管道对相同数据生成一致的指纹
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sys
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
# 添加项目根目录到Python路径
|
|
13
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
14
|
+
|
|
15
|
+
from crawlo.utils.fingerprint import FingerprintGenerator
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class MockItem:
|
|
19
|
+
"""模拟数据项类"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
for key, value in kwargs.items():
|
|
23
|
+
setattr(self, key, value)
|
|
24
|
+
|
|
25
|
+
def to_dict(self):
|
|
26
|
+
"""转换为字典"""
|
|
27
|
+
return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def test_all_pipeline_fingerprints():
|
|
31
|
+
"""测试所有管道指纹一致性"""
|
|
32
|
+
# 创建测试数据项
|
|
33
|
+
test_item = MockItem(
|
|
34
|
+
title="Test Title",
|
|
35
|
+
url="https://example.com",
|
|
36
|
+
content="Test content",
|
|
37
|
+
price=99.99
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# 使用指纹生成器生成指纹
|
|
41
|
+
expected_fingerprint = FingerprintGenerator.item_fingerprint(test_item)
|
|
42
|
+
|
|
43
|
+
print(f"Expected fingerprint: {expected_fingerprint}")
|
|
44
|
+
|
|
45
|
+
# 测试内存去重管道指纹生成方法
|
|
46
|
+
try:
|
|
47
|
+
from crawlo.pipelines.memory_dedup_pipeline import MemoryDedupPipeline
|
|
48
|
+
memory_pipeline = MemoryDedupPipeline()
|
|
49
|
+
memory_fingerprint = memory_pipeline._generate_item_fingerprint(test_item)
|
|
50
|
+
print(f"Memory pipeline fingerprint: {memory_fingerprint}")
|
|
51
|
+
assert memory_fingerprint == expected_fingerprint, "Memory pipeline fingerprint mismatch"
|
|
52
|
+
print("✓ Memory pipeline fingerprint一致")
|
|
53
|
+
except Exception as e:
|
|
54
|
+
print(f"✗ Memory pipeline test failed: {e}")
|
|
55
|
+
|
|
56
|
+
# 测试Redis去重管道指纹生成方法
|
|
57
|
+
try:
|
|
58
|
+
from crawlo.pipelines.redis_dedup_pipeline import RedisDedupPipeline
|
|
59
|
+
redis_pipeline = RedisDedupPipeline()
|
|
60
|
+
redis_fingerprint = redis_pipeline._generate_item_fingerprint(test_item)
|
|
61
|
+
print(f"Redis pipeline fingerprint: {redis_fingerprint}")
|
|
62
|
+
assert redis_fingerprint == expected_fingerprint, "Redis pipeline fingerprint mismatch"
|
|
63
|
+
print("✓ Redis pipeline fingerprint一致")
|
|
64
|
+
except Exception as e:
|
|
65
|
+
print(f"✗ Redis pipeline test failed: {e}")
|
|
66
|
+
|
|
67
|
+
# 测试Bloom去重管道指纹生成方法
|
|
68
|
+
try:
|
|
69
|
+
from crawlo.pipelines.bloom_dedup_pipeline import BloomDedupPipeline
|
|
70
|
+
bloom_pipeline = BloomDedupPipeline()
|
|
71
|
+
bloom_fingerprint = bloom_pipeline._generate_item_fingerprint(test_item)
|
|
72
|
+
print(f"Bloom pipeline fingerprint: {bloom_fingerprint}")
|
|
73
|
+
assert bloom_fingerprint == expected_fingerprint, "Bloom pipeline fingerprint mismatch"
|
|
74
|
+
print("✓ Bloom pipeline fingerprint一致")
|
|
75
|
+
except Exception as e:
|
|
76
|
+
print(f"✗ Bloom pipeline test failed: {e}")
|
|
77
|
+
|
|
78
|
+
# 测试数据库去重管道指纹生成方法
|
|
79
|
+
try:
|
|
80
|
+
from crawlo.pipelines.database_dedup_pipeline import DatabaseDedupPipeline
|
|
81
|
+
database_pipeline = DatabaseDedupPipeline()
|
|
82
|
+
database_fingerprint = database_pipeline._generate_item_fingerprint(test_item)
|
|
83
|
+
print(f"Database pipeline fingerprint: {database_fingerprint}")
|
|
84
|
+
assert database_fingerprint == expected_fingerprint, "Database pipeline fingerprint mismatch"
|
|
85
|
+
print("✓ Database pipeline fingerprint一致")
|
|
86
|
+
except Exception as e:
|
|
87
|
+
print(f"✗ Database pipeline test failed: {e}")
|
|
88
|
+
|
|
89
|
+
# 测试分布式协调工具指纹生成方法
|
|
90
|
+
try:
|
|
91
|
+
from crawlo.tools.distributed_coordinator import DeduplicationTool
|
|
92
|
+
dedup_tool = DeduplicationTool()
|
|
93
|
+
tool_fingerprint = dedup_tool.generate_fingerprint(test_item.to_dict())
|
|
94
|
+
print(f"Deduplication tool fingerprint: {tool_fingerprint}")
|
|
95
|
+
# 注意:这里我们传入的是字典,因为工具类的generate_fingerprint方法直接处理数据
|
|
96
|
+
expected_tool_fingerprint = FingerprintGenerator.data_fingerprint(test_item.to_dict())
|
|
97
|
+
assert tool_fingerprint == expected_tool_fingerprint, "Deduplication tool fingerprint mismatch"
|
|
98
|
+
print("✓ Deduplication tool fingerprint一致")
|
|
99
|
+
except Exception as e:
|
|
100
|
+
print(f"✗ Deduplication tool test failed: {e}")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def test_fingerprint_stability():
|
|
104
|
+
"""测试指纹稳定性"""
|
|
105
|
+
# 创建相同的测试数据项多次
|
|
106
|
+
item1 = MockItem(
|
|
107
|
+
title="Test Title",
|
|
108
|
+
url="https://example.com",
|
|
109
|
+
content="Test content",
|
|
110
|
+
price=99.99
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
item2 = MockItem(
|
|
114
|
+
title="Test Title",
|
|
115
|
+
url="https://example.com",
|
|
116
|
+
content="Test content",
|
|
117
|
+
price=99.99
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# 生成指纹
|
|
121
|
+
fingerprint1 = FingerprintGenerator.item_fingerprint(item1)
|
|
122
|
+
fingerprint2 = FingerprintGenerator.item_fingerprint(item2)
|
|
123
|
+
|
|
124
|
+
# 验证相同数据生成相同指纹
|
|
125
|
+
print(f"\nFirst fingerprint: {fingerprint1}")
|
|
126
|
+
print(f"Second fingerprint: {fingerprint2}")
|
|
127
|
+
assert fingerprint1 == fingerprint2, "Same items should generate same fingerprints"
|
|
128
|
+
print("✓ 相同数据生成相同指纹")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
if __name__ == '__main__':
|
|
132
|
+
test_all_pipeline_fingerprints()
|
|
133
|
+
test_fingerprint_stability()
|
|
134
|
+
print("\n🎉 所有测试通过!")
|