crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -34
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +285 -285
- crawlo/commands/startproject.py +196 -196
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +279 -279
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -171
- crawlo/core/enhanced_engine.py +189 -189
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +166 -162
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +242 -242
- crawlo/downloader/aiohttp_downloader.py +212 -212
- crawlo/downloader/cffi_downloader.py +251 -251
- crawlo/downloader/httpx_downloader.py +259 -257
- crawlo/event.py +11 -11
- crawlo/exceptions.py +82 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +242 -242
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -248
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -125
- crawlo/mode_manager.py +200 -200
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +311 -311
- crawlo/network/response.py +271 -269
- crawlo/pipelines/__init__.py +22 -13
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/project.py +153 -153
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +307 -303
- crawlo/queue/redis_priority_queue.py +208 -191
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +245 -226
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +86 -86
- crawlo/templates/project/pipelines.py.tmpl +341 -335
- crawlo/templates/project/run.py.tmpl +251 -238
- crawlo/templates/project/settings.py.tmpl +250 -247
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +177 -177
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +439 -335
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
- crawlo-1.1.3.dist-info/RECORD +113 -0
- examples/__init__.py +7 -7
- examples/controlled_spider_example.py +205 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +153 -153
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +28 -28
- tests/test_redis_queue.py +224 -224
- tests/test_request_serialization.py +70 -70
- tests/test_scheduler.py +241 -241
- crawlo-1.1.2.dist-info/RECORD +0 -108
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,336 +1,342 @@
|
|
|
1
|
-
# -*- coding: UTF-8 -*-
|
|
2
|
-
"""
|
|
3
|
-
{{project_name}}.pipelines
|
|
4
|
-
==========================
|
|
5
|
-
数据管道,用于处理 Spider 返回的 Item。
|
|
6
|
-
例如:清理、验证、去重、保存到数据库等。
|
|
7
|
-
|
|
8
|
-
支持异步并发处理和多种存储后端。
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import json
|
|
12
|
-
import asyncio
|
|
13
|
-
from typing import Dict, Any
|
|
14
|
-
from datetime import datetime
|
|
15
|
-
from crawlo.exceptions import DropItem
|
|
16
|
-
from crawlo.utils.log import get_logger
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class ValidationPipeline:
|
|
20
|
-
"""
|
|
21
|
-
数据验证管道。
|
|
22
|
-
|
|
23
|
-
验证必要字段是否存在,过滤无效数据。
|
|
24
|
-
"""
|
|
25
|
-
|
|
26
|
-
def __init__(self):
|
|
27
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
28
|
-
# 必要字段列表(根据实际需要修改)
|
|
29
|
-
self.required_fields = ['title', 'url']
|
|
30
|
-
|
|
31
|
-
def process_item(self, item, spider):
|
|
32
|
-
"""验证数据项。"""
|
|
33
|
-
# 检查必要字段
|
|
34
|
-
for field in self.required_fields:
|
|
35
|
-
if not item.get(field):
|
|
36
|
-
raise DropItem(f"缺少必要字段: {field}")
|
|
37
|
-
|
|
38
|
-
# 数据清理
|
|
39
|
-
if 'title' in item:
|
|
40
|
-
item['title'] = str(item['title']).strip()
|
|
41
|
-
|
|
42
|
-
# 添加时间戳
|
|
43
|
-
item['crawled_at'] = datetime.now().isoformat()
|
|
44
|
-
|
|
45
|
-
self.logger.debug(f"数据验证通过: {item.get('url', 'Unknown URL')}")
|
|
46
|
-
return item
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
class PrintItemPipeline:
|
|
50
|
-
"""
|
|
51
|
-
简单的打印管道,用于调试。
|
|
52
|
-
"""
|
|
53
|
-
|
|
54
|
-
def __init__(self):
|
|
55
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
56
|
-
self.item_count = 0
|
|
57
|
-
|
|
58
|
-
def process_item(self, item, spider):
|
|
59
|
-
"""打印数据项。"""
|
|
60
|
-
self.item_count += 1
|
|
61
|
-
self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
|
|
62
|
-
return item
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
class DuplicatesPipeline:
|
|
66
|
-
"""
|
|
67
|
-
去重管道。
|
|
68
|
-
|
|
69
|
-
基于指定字段进行去重,防止重复数据。
|
|
70
|
-
"""
|
|
71
|
-
|
|
72
|
-
def __init__(self):
|
|
73
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
74
|
-
self.seen = set()
|
|
75
|
-
# 去重字段(可以是 'url', 'id', 或其他唯一标识)
|
|
76
|
-
self.duplicate_field = 'url'
|
|
77
|
-
self.drop_count = 0
|
|
78
|
-
|
|
79
|
-
def process_item(self, item, spider):
|
|
80
|
-
"""检查并去除重复数据。"""
|
|
81
|
-
identifier = item.get(self.duplicate_field)
|
|
82
|
-
|
|
83
|
-
if not identifier:
|
|
84
|
-
self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
|
|
85
|
-
return item
|
|
86
|
-
|
|
87
|
-
if identifier in self.seen:
|
|
88
|
-
self.drop_count += 1
|
|
89
|
-
self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
|
|
90
|
-
raise DropItem(f"重复数据: {identifier}")
|
|
91
|
-
|
|
92
|
-
self.seen.add(identifier)
|
|
93
|
-
return item
|
|
94
|
-
|
|
95
|
-
def close_spider(self, spider):
|
|
96
|
-
"""爬虫结束时输出统计信息。"""
|
|
97
|
-
self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
class JsonFilesPipeline:
|
|
101
|
-
"""
|
|
102
|
-
JSON 文件存储管道。
|
|
103
|
-
|
|
104
|
-
将每个数据项保存为单独的 JSON 文件。
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
def __init__(self, output_dir='output'):
|
|
108
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
109
|
-
self.output_dir = output_dir
|
|
110
|
-
self.file_count = 0
|
|
111
|
-
|
|
112
|
-
@classmethod
|
|
113
|
-
def from_crawler(cls, crawler):
|
|
114
|
-
"""从爬虫配置创建管道实例。"""
|
|
115
|
-
output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
|
|
116
|
-
return cls(output_dir=output_dir)
|
|
117
|
-
|
|
118
|
-
def open_spider(self, spider):
|
|
119
|
-
"""爬虫启动时创建输出目录。"""
|
|
120
|
-
import os
|
|
121
|
-
os.makedirs(self.output_dir, exist_ok=True)
|
|
122
|
-
self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
|
|
123
|
-
|
|
124
|
-
def process_item(self, item, spider):
|
|
125
|
-
"""将数据项保存为 JSON 文件。"""
|
|
126
|
-
self.file_count += 1
|
|
127
|
-
filename = f"{spider.name}_{self.file_count:06d}.json"
|
|
128
|
-
filepath = f"{self.output_dir}/{filename}"
|
|
129
|
-
|
|
130
|
-
with open(filepath, 'w', encoding='utf-8') as f:
|
|
131
|
-
json.dump(dict(item), f, ensure_ascii=False, indent=2)
|
|
132
|
-
|
|
133
|
-
self.logger.debug(f"已保存: {filepath}")
|
|
134
|
-
return item
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
class DatabasePipeline:
|
|
138
|
-
"""
|
|
139
|
-
数据库存储管道示例。
|
|
140
|
-
|
|
141
|
-
支持 MySQL 和 MongoDB,可根据需要选择。
|
|
142
|
-
"""
|
|
143
|
-
|
|
144
|
-
def __init__(self, database_type='mysql'):
|
|
145
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
146
|
-
self.database_type = database_type
|
|
147
|
-
self.connection = None
|
|
148
|
-
self.batch_items = []
|
|
149
|
-
self.batch_size = 100 # 批量写入大小
|
|
150
|
-
|
|
151
|
-
@classmethod
|
|
152
|
-
def from_crawler(cls, crawler):
|
|
153
|
-
"""从爬虫配置创建管道实例。"""
|
|
154
|
-
db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
|
|
155
|
-
return cls(database_type=db_type)
|
|
156
|
-
|
|
157
|
-
async def open_spider(self, spider):
|
|
158
|
-
"""爬虫启动时连接数据库。"""
|
|
159
|
-
if self.database_type == 'mysql':
|
|
160
|
-
await self._connect_mysql(spider)
|
|
161
|
-
elif self.database_type == 'mongodb':
|
|
162
|
-
await self._connect_mongodb(spider)
|
|
163
|
-
else:
|
|
164
|
-
raise ValueError(f"不支持的数据库类型: {self.database_type}")
|
|
165
|
-
|
|
166
|
-
async def _connect_mysql(self, spider):
|
|
167
|
-
"""连接 MySQL 数据库。"""
|
|
168
|
-
try:
|
|
169
|
-
import aiomysql
|
|
170
|
-
|
|
171
|
-
settings = spider.crawler.settings
|
|
172
|
-
self.connection = await aiomysql.connect(
|
|
173
|
-
host=settings.get('MYSQL_HOST', '127.0.0.1'),
|
|
174
|
-
port=settings.get('MYSQL_PORT', 3306),
|
|
175
|
-
user=settings.get('MYSQL_USER', 'root'),
|
|
176
|
-
password=settings.get('MYSQL_PASSWORD', ''),
|
|
177
|
-
db=settings.get('MYSQL_DB', '{{project_name}}'),
|
|
178
|
-
charset='utf8mb4',
|
|
179
|
-
autocommit=True
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
# 创建表(如果不存在)
|
|
183
|
-
await self._create_mysql_table(spider)
|
|
184
|
-
self.logger.info("已连接到 MySQL 数据库")
|
|
185
|
-
|
|
186
|
-
except ImportError:
|
|
187
|
-
self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
|
|
188
|
-
raise
|
|
189
|
-
except Exception as e:
|
|
190
|
-
self.logger.error(f"MySQL 连接失败: {e}")
|
|
191
|
-
raise
|
|
192
|
-
|
|
193
|
-
async def _connect_mongodb(self, spider):
|
|
194
|
-
"""连接 MongoDB 数据库。"""
|
|
195
|
-
try:
|
|
196
|
-
from motor.motor_asyncio import AsyncIOMotorClient
|
|
197
|
-
|
|
198
|
-
settings = spider.crawler.settings
|
|
199
|
-
mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
|
|
200
|
-
|
|
201
|
-
self.connection = AsyncIOMotorClient(mongo_uri)
|
|
202
|
-
self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
|
|
203
|
-
self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
|
|
204
|
-
|
|
205
|
-
self.logger.info("已连接到 MongoDB 数据库")
|
|
206
|
-
|
|
207
|
-
except ImportError:
|
|
208
|
-
self.logger.error("缺少 motor 依赖,请安装: pip install motor")
|
|
209
|
-
raise
|
|
210
|
-
except Exception as e:
|
|
211
|
-
self.logger.error(f"MongoDB 连接失败: {e}")
|
|
212
|
-
raise
|
|
213
|
-
|
|
214
|
-
async def _create_mysql_table(self, spider):
|
|
215
|
-
"""创建 MySQL 表结构。"""
|
|
216
|
-
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
217
|
-
|
|
218
|
-
create_sql = f"""
|
|
219
|
-
CREATE TABLE IF NOT EXISTS `{table_name}` (
|
|
220
|
-
`id` bigint AUTO_INCREMENT PRIMARY KEY,
|
|
221
|
-
`title` varchar(500) DEFAULT NULL,
|
|
222
|
-
`url` varchar(1000) NOT NULL,
|
|
223
|
-
`content` text DEFAULT NULL,
|
|
224
|
-
`crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
|
|
225
|
-
`spider_name` varchar(100) DEFAULT NULL,
|
|
226
|
-
`extra_data` json DEFAULT NULL,
|
|
227
|
-
UNIQUE KEY `unique_url` (`url`(255)),
|
|
228
|
-
INDEX `idx_spider_name` (`spider_name`),
|
|
229
|
-
INDEX `idx_crawled_at` (`crawled_at`)
|
|
230
|
-
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
|
231
|
-
"""
|
|
232
|
-
|
|
233
|
-
async with self.connection.cursor() as cursor:
|
|
234
|
-
await cursor.execute(create_sql)
|
|
235
|
-
self.logger.info(f"表 '{table_name}' 准备就绪")
|
|
236
|
-
|
|
237
|
-
async def process_item(self, item, spider):
|
|
238
|
-
"""处理数据项(批量存储)。"""
|
|
239
|
-
self.batch_items.append(dict(item))
|
|
240
|
-
|
|
241
|
-
# 批量存储
|
|
242
|
-
if len(self.batch_items) >= self.batch_size:
|
|
243
|
-
await self._save_batch(spider)
|
|
244
|
-
|
|
245
|
-
return item
|
|
246
|
-
|
|
247
|
-
async def _save_batch(self, spider):
|
|
248
|
-
"""批量保存数据。"""
|
|
249
|
-
if not self.batch_items:
|
|
250
|
-
return
|
|
251
|
-
|
|
252
|
-
try:
|
|
253
|
-
if self.database_type == 'mysql':
|
|
254
|
-
await self._save_to_mysql(spider)
|
|
255
|
-
elif self.database_type == 'mongodb':
|
|
256
|
-
await self._save_to_mongodb(spider)
|
|
257
|
-
|
|
258
|
-
self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
|
|
259
|
-
self.batch_items.clear()
|
|
260
|
-
|
|
261
|
-
except Exception as e:
|
|
262
|
-
self.logger.error(f"批量保存失败: {e}")
|
|
263
|
-
# 可以选择重试或记录失败数据
|
|
264
|
-
|
|
265
|
-
async def _save_to_mysql(self, spider):
|
|
266
|
-
"""保存到 MySQL。"""
|
|
267
|
-
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
268
|
-
|
|
269
|
-
insert_sql = f"""
|
|
270
|
-
INSERT INTO `{table_name}`
|
|
271
|
-
(`title`, `url`, `content`, `spider_name`, `extra_data`)
|
|
272
|
-
VALUES (%s, %s, %s, %s, %s)
|
|
273
|
-
ON DUPLICATE KEY UPDATE
|
|
274
|
-
`title` = VALUES(`title`),
|
|
275
|
-
`content` = VALUES(`content`),
|
|
276
|
-
`crawled_at` = CURRENT_TIMESTAMP
|
|
277
|
-
"""
|
|
278
|
-
|
|
279
|
-
async with self.connection.cursor() as cursor:
|
|
280
|
-
batch_data = []
|
|
281
|
-
for item in self.batch_items:
|
|
282
|
-
# 提取标准字段
|
|
283
|
-
title = item.get('title', '')[:500] # 限制长度
|
|
284
|
-
url = item.get('url', '')
|
|
285
|
-
content = item.get('content', '')
|
|
286
|
-
|
|
287
|
-
# 其他字段作为 JSON 存储
|
|
288
|
-
extra_fields = {k: v for k, v in item.items()
|
|
289
|
-
if k not in ['title', 'url', 'content']}
|
|
290
|
-
extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
|
|
291
|
-
|
|
292
|
-
batch_data.append((title, url, content, spider.name, extra_data))
|
|
293
|
-
|
|
294
|
-
await cursor.executemany(insert_sql, batch_data)
|
|
295
|
-
|
|
296
|
-
async def _save_to_mongodb(self, spider):
|
|
297
|
-
"""保存到 MongoDB。"""
|
|
298
|
-
# 为每个数据项添加 spider_name
|
|
299
|
-
for item in self.batch_items:
|
|
300
|
-
item['spider_name'] = spider.name
|
|
301
|
-
|
|
302
|
-
# 批量插入
|
|
303
|
-
await self.collection.insert_many(self.batch_items)
|
|
304
|
-
|
|
305
|
-
async def close_spider(self, spider):
|
|
306
|
-
"""爬虫结束时保存剩余数据并关闭连接。"""
|
|
307
|
-
# 保存剩余数据
|
|
308
|
-
if self.batch_items:
|
|
309
|
-
await self._save_batch(spider)
|
|
310
|
-
|
|
311
|
-
# 关闭连接
|
|
312
|
-
if self.connection:
|
|
313
|
-
if self.database_type == 'mysql':
|
|
314
|
-
self.connection.close()
|
|
315
|
-
elif self.database_type == 'mongodb':
|
|
316
|
-
self.connection.close()
|
|
317
|
-
|
|
318
|
-
self.logger.info("数据库连接已关闭")
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
# ======================== 使用说明 ========================
|
|
322
|
-
#
|
|
323
|
-
# 在 settings.py 中启用管道:
|
|
324
|
-
# PIPELINES = [
|
|
325
|
-
# '{{project_name}}.pipelines.ValidationPipeline', # 数据验证
|
|
326
|
-
# '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
|
|
327
|
-
# '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
|
|
328
|
-
# '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
|
|
329
|
-
# '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
|
|
330
|
-
# ]
|
|
331
|
-
#
|
|
332
|
-
#
|
|
333
|
-
#
|
|
334
|
-
#
|
|
335
|
-
#
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
{{project_name}}.pipelines
|
|
4
|
+
==========================
|
|
5
|
+
数据管道,用于处理 Spider 返回的 Item。
|
|
6
|
+
例如:清理、验证、去重、保存到数据库等。
|
|
7
|
+
|
|
8
|
+
支持异步并发处理和多种存储后端。
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import asyncio
|
|
13
|
+
from typing import Dict, Any
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from crawlo.exceptions import DropItem
|
|
16
|
+
from crawlo.utils.log import get_logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ValidationPipeline:
|
|
20
|
+
"""
|
|
21
|
+
数据验证管道。
|
|
22
|
+
|
|
23
|
+
验证必要字段是否存在,过滤无效数据。
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self):
|
|
27
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
28
|
+
# 必要字段列表(根据实际需要修改)
|
|
29
|
+
self.required_fields = ['title', 'url']
|
|
30
|
+
|
|
31
|
+
def process_item(self, item, spider):
|
|
32
|
+
"""验证数据项。"""
|
|
33
|
+
# 检查必要字段
|
|
34
|
+
for field in self.required_fields:
|
|
35
|
+
if not item.get(field):
|
|
36
|
+
raise DropItem(f"缺少必要字段: {field}")
|
|
37
|
+
|
|
38
|
+
# 数据清理
|
|
39
|
+
if 'title' in item:
|
|
40
|
+
item['title'] = str(item['title']).strip()
|
|
41
|
+
|
|
42
|
+
# 添加时间戳
|
|
43
|
+
item['crawled_at'] = datetime.now().isoformat()
|
|
44
|
+
|
|
45
|
+
self.logger.debug(f"数据验证通过: {item.get('url', 'Unknown URL')}")
|
|
46
|
+
return item
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class PrintItemPipeline:
|
|
50
|
+
"""
|
|
51
|
+
简单的打印管道,用于调试。
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self):
|
|
55
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
56
|
+
self.item_count = 0
|
|
57
|
+
|
|
58
|
+
def process_item(self, item, spider):
|
|
59
|
+
"""打印数据项。"""
|
|
60
|
+
self.item_count += 1
|
|
61
|
+
self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
|
|
62
|
+
return item
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class DuplicatesPipeline:
|
|
66
|
+
"""
|
|
67
|
+
去重管道。
|
|
68
|
+
|
|
69
|
+
基于指定字段进行去重,防止重复数据。
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(self):
|
|
73
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
74
|
+
self.seen = set()
|
|
75
|
+
# 去重字段(可以是 'url', 'id', 或其他唯一标识)
|
|
76
|
+
self.duplicate_field = 'url'
|
|
77
|
+
self.drop_count = 0
|
|
78
|
+
|
|
79
|
+
def process_item(self, item, spider):
|
|
80
|
+
"""检查并去除重复数据。"""
|
|
81
|
+
identifier = item.get(self.duplicate_field)
|
|
82
|
+
|
|
83
|
+
if not identifier:
|
|
84
|
+
self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
|
|
85
|
+
return item
|
|
86
|
+
|
|
87
|
+
if identifier in self.seen:
|
|
88
|
+
self.drop_count += 1
|
|
89
|
+
self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
|
|
90
|
+
raise DropItem(f"重复数据: {identifier}")
|
|
91
|
+
|
|
92
|
+
self.seen.add(identifier)
|
|
93
|
+
return item
|
|
94
|
+
|
|
95
|
+
def close_spider(self, spider):
|
|
96
|
+
"""爬虫结束时输出统计信息。"""
|
|
97
|
+
self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class JsonFilesPipeline:
|
|
101
|
+
"""
|
|
102
|
+
JSON 文件存储管道。
|
|
103
|
+
|
|
104
|
+
将每个数据项保存为单独的 JSON 文件。
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def __init__(self, output_dir='output'):
|
|
108
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
109
|
+
self.output_dir = output_dir
|
|
110
|
+
self.file_count = 0
|
|
111
|
+
|
|
112
|
+
@classmethod
|
|
113
|
+
def from_crawler(cls, crawler):
|
|
114
|
+
"""从爬虫配置创建管道实例。"""
|
|
115
|
+
output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
|
|
116
|
+
return cls(output_dir=output_dir)
|
|
117
|
+
|
|
118
|
+
def open_spider(self, spider):
|
|
119
|
+
"""爬虫启动时创建输出目录。"""
|
|
120
|
+
import os
|
|
121
|
+
os.makedirs(self.output_dir, exist_ok=True)
|
|
122
|
+
self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
|
|
123
|
+
|
|
124
|
+
def process_item(self, item, spider):
|
|
125
|
+
"""将数据项保存为 JSON 文件。"""
|
|
126
|
+
self.file_count += 1
|
|
127
|
+
filename = f"{spider.name}_{self.file_count:06d}.json"
|
|
128
|
+
filepath = f"{self.output_dir}/{filename}"
|
|
129
|
+
|
|
130
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
131
|
+
json.dump(dict(item), f, ensure_ascii=False, indent=2)
|
|
132
|
+
|
|
133
|
+
self.logger.debug(f"已保存: {filepath}")
|
|
134
|
+
return item
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class DatabasePipeline:
|
|
138
|
+
"""
|
|
139
|
+
数据库存储管道示例。
|
|
140
|
+
|
|
141
|
+
支持 MySQL 和 MongoDB,可根据需要选择。
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
def __init__(self, database_type='mysql'):
|
|
145
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
146
|
+
self.database_type = database_type
|
|
147
|
+
self.connection = None
|
|
148
|
+
self.batch_items = []
|
|
149
|
+
self.batch_size = 100 # 批量写入大小
|
|
150
|
+
|
|
151
|
+
@classmethod
|
|
152
|
+
def from_crawler(cls, crawler):
|
|
153
|
+
"""从爬虫配置创建管道实例。"""
|
|
154
|
+
db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
|
|
155
|
+
return cls(database_type=db_type)
|
|
156
|
+
|
|
157
|
+
async def open_spider(self, spider):
|
|
158
|
+
"""爬虫启动时连接数据库。"""
|
|
159
|
+
if self.database_type == 'mysql':
|
|
160
|
+
await self._connect_mysql(spider)
|
|
161
|
+
elif self.database_type == 'mongodb':
|
|
162
|
+
await self._connect_mongodb(spider)
|
|
163
|
+
else:
|
|
164
|
+
raise ValueError(f"不支持的数据库类型: {self.database_type}")
|
|
165
|
+
|
|
166
|
+
async def _connect_mysql(self, spider):
|
|
167
|
+
"""连接 MySQL 数据库。"""
|
|
168
|
+
try:
|
|
169
|
+
import aiomysql
|
|
170
|
+
|
|
171
|
+
settings = spider.crawler.settings
|
|
172
|
+
self.connection = await aiomysql.connect(
|
|
173
|
+
host=settings.get('MYSQL_HOST', '127.0.0.1'),
|
|
174
|
+
port=settings.get('MYSQL_PORT', 3306),
|
|
175
|
+
user=settings.get('MYSQL_USER', 'root'),
|
|
176
|
+
password=settings.get('MYSQL_PASSWORD', ''),
|
|
177
|
+
db=settings.get('MYSQL_DB', '{{project_name}}'),
|
|
178
|
+
charset='utf8mb4',
|
|
179
|
+
autocommit=True
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# 创建表(如果不存在)
|
|
183
|
+
await self._create_mysql_table(spider)
|
|
184
|
+
self.logger.info("已连接到 MySQL 数据库")
|
|
185
|
+
|
|
186
|
+
except ImportError:
|
|
187
|
+
self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
|
|
188
|
+
raise
|
|
189
|
+
except Exception as e:
|
|
190
|
+
self.logger.error(f"MySQL 连接失败: {e}")
|
|
191
|
+
raise
|
|
192
|
+
|
|
193
|
+
async def _connect_mongodb(self, spider):
|
|
194
|
+
"""连接 MongoDB 数据库。"""
|
|
195
|
+
try:
|
|
196
|
+
from motor.motor_asyncio import AsyncIOMotorClient
|
|
197
|
+
|
|
198
|
+
settings = spider.crawler.settings
|
|
199
|
+
mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
|
|
200
|
+
|
|
201
|
+
self.connection = AsyncIOMotorClient(mongo_uri)
|
|
202
|
+
self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
|
|
203
|
+
self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
|
|
204
|
+
|
|
205
|
+
self.logger.info("已连接到 MongoDB 数据库")
|
|
206
|
+
|
|
207
|
+
except ImportError:
|
|
208
|
+
self.logger.error("缺少 motor 依赖,请安装: pip install motor")
|
|
209
|
+
raise
|
|
210
|
+
except Exception as e:
|
|
211
|
+
self.logger.error(f"MongoDB 连接失败: {e}")
|
|
212
|
+
raise
|
|
213
|
+
|
|
214
|
+
async def _create_mysql_table(self, spider):
|
|
215
|
+
"""创建 MySQL 表结构。"""
|
|
216
|
+
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
217
|
+
|
|
218
|
+
create_sql = f"""
|
|
219
|
+
CREATE TABLE IF NOT EXISTS `{table_name}` (
|
|
220
|
+
`id` bigint AUTO_INCREMENT PRIMARY KEY,
|
|
221
|
+
`title` varchar(500) DEFAULT NULL,
|
|
222
|
+
`url` varchar(1000) NOT NULL,
|
|
223
|
+
`content` text DEFAULT NULL,
|
|
224
|
+
`crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
|
|
225
|
+
`spider_name` varchar(100) DEFAULT NULL,
|
|
226
|
+
`extra_data` json DEFAULT NULL,
|
|
227
|
+
UNIQUE KEY `unique_url` (`url`(255)),
|
|
228
|
+
INDEX `idx_spider_name` (`spider_name`),
|
|
229
|
+
INDEX `idx_crawled_at` (`crawled_at`)
|
|
230
|
+
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
async with self.connection.cursor() as cursor:
|
|
234
|
+
await cursor.execute(create_sql)
|
|
235
|
+
self.logger.info(f"表 '{table_name}' 准备就绪")
|
|
236
|
+
|
|
237
|
+
async def process_item(self, item, spider):
|
|
238
|
+
"""处理数据项(批量存储)。"""
|
|
239
|
+
self.batch_items.append(dict(item))
|
|
240
|
+
|
|
241
|
+
# 批量存储
|
|
242
|
+
if len(self.batch_items) >= self.batch_size:
|
|
243
|
+
await self._save_batch(spider)
|
|
244
|
+
|
|
245
|
+
return item
|
|
246
|
+
|
|
247
|
+
async def _save_batch(self, spider):
|
|
248
|
+
"""批量保存数据。"""
|
|
249
|
+
if not self.batch_items:
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
if self.database_type == 'mysql':
|
|
254
|
+
await self._save_to_mysql(spider)
|
|
255
|
+
elif self.database_type == 'mongodb':
|
|
256
|
+
await self._save_to_mongodb(spider)
|
|
257
|
+
|
|
258
|
+
self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
|
|
259
|
+
self.batch_items.clear()
|
|
260
|
+
|
|
261
|
+
except Exception as e:
|
|
262
|
+
self.logger.error(f"批量保存失败: {e}")
|
|
263
|
+
# 可以选择重试或记录失败数据
|
|
264
|
+
|
|
265
|
+
async def _save_to_mysql(self, spider):
|
|
266
|
+
"""保存到 MySQL。"""
|
|
267
|
+
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
268
|
+
|
|
269
|
+
insert_sql = f"""
|
|
270
|
+
INSERT INTO `{table_name}`
|
|
271
|
+
(`title`, `url`, `content`, `spider_name`, `extra_data`)
|
|
272
|
+
VALUES (%s, %s, %s, %s, %s)
|
|
273
|
+
ON DUPLICATE KEY UPDATE
|
|
274
|
+
`title` = VALUES(`title`),
|
|
275
|
+
`content` = VALUES(`content`),
|
|
276
|
+
`crawled_at` = CURRENT_TIMESTAMP
|
|
277
|
+
"""
|
|
278
|
+
|
|
279
|
+
async with self.connection.cursor() as cursor:
|
|
280
|
+
batch_data = []
|
|
281
|
+
for item in self.batch_items:
|
|
282
|
+
# 提取标准字段
|
|
283
|
+
title = item.get('title', '')[:500] # 限制长度
|
|
284
|
+
url = item.get('url', '')
|
|
285
|
+
content = item.get('content', '')
|
|
286
|
+
|
|
287
|
+
# 其他字段作为 JSON 存储
|
|
288
|
+
extra_fields = {k: v for k, v in item.items()
|
|
289
|
+
if k not in ['title', 'url', 'content']}
|
|
290
|
+
extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
|
|
291
|
+
|
|
292
|
+
batch_data.append((title, url, content, spider.name, extra_data))
|
|
293
|
+
|
|
294
|
+
await cursor.executemany(insert_sql, batch_data)
|
|
295
|
+
|
|
296
|
+
async def _save_to_mongodb(self, spider):
|
|
297
|
+
"""保存到 MongoDB。"""
|
|
298
|
+
# 为每个数据项添加 spider_name
|
|
299
|
+
for item in self.batch_items:
|
|
300
|
+
item['spider_name'] = spider.name
|
|
301
|
+
|
|
302
|
+
# 批量插入
|
|
303
|
+
await self.collection.insert_many(self.batch_items)
|
|
304
|
+
|
|
305
|
+
async def close_spider(self, spider):
|
|
306
|
+
"""爬虫结束时保存剩余数据并关闭连接。"""
|
|
307
|
+
# 保存剩余数据
|
|
308
|
+
if self.batch_items:
|
|
309
|
+
await self._save_batch(spider)
|
|
310
|
+
|
|
311
|
+
# 关闭连接
|
|
312
|
+
if self.connection:
|
|
313
|
+
if self.database_type == 'mysql':
|
|
314
|
+
self.connection.close()
|
|
315
|
+
elif self.database_type == 'mongodb':
|
|
316
|
+
self.connection.close()
|
|
317
|
+
|
|
318
|
+
self.logger.info("数据库连接已关闭")
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
# ======================== 使用说明 ========================
|
|
322
|
+
#
|
|
323
|
+
# 在 settings.py 中启用管道:
|
|
324
|
+
# PIPELINES = [
|
|
325
|
+
# '{{project_name}}.pipelines.ValidationPipeline', # 数据验证
|
|
326
|
+
# '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
|
|
327
|
+
# '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
|
|
328
|
+
# '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
|
|
329
|
+
# '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
|
|
330
|
+
# ]
|
|
331
|
+
#
|
|
332
|
+
# Crawlo 框架提供了多种内置的去重管道:
|
|
333
|
+
# 1. crawlo.pipelines.MemoryDedupPipeline - 内存去重(单机模式默认)
|
|
334
|
+
# 2. crawlo.pipelines.RedisDedupPipeline - Redis去重(分布式模式默认)
|
|
335
|
+
# 3. crawlo.pipelines.BloomDedupPipeline - Bloom Filter去重(大规模数据)
|
|
336
|
+
# 4. crawlo.pipelines.DatabaseDedupPipeline - 数据库去重(持久化)
|
|
337
|
+
#
|
|
338
|
+
# 相关配置:
|
|
339
|
+
# JSON_OUTPUT_DIR = 'output' # JSON 文件输出目录
|
|
340
|
+
# DATABASE_TYPE = 'mysql' # 数据库类型: mysql/mongodb
|
|
341
|
+
# MYSQL_TABLE = '{{project_name}}_data' # MySQL 表名
|
|
336
342
|
# ======================== 使用说明 ========================
|