crawlo 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__version__.py +1 -1
- crawlo/core/scheduler.py +20 -16
- crawlo/downloader/httpx_downloader.py +14 -12
- crawlo/exceptions.py +4 -0
- crawlo/extension/__init__.py +17 -10
- crawlo/extension/health_check.py +142 -0
- crawlo/extension/log_interval.py +27 -18
- crawlo/extension/log_stats.py +62 -24
- crawlo/extension/logging_extension.py +18 -9
- crawlo/extension/memory_monitor.py +89 -0
- crawlo/extension/performance_profiler.py +118 -0
- crawlo/extension/request_recorder.py +108 -0
- crawlo/filters/aioredis_filter.py +2 -2
- crawlo/middleware/retry.py +3 -3
- crawlo/network/request.py +2 -2
- crawlo/network/response.py +25 -23
- crawlo/pipelines/__init__.py +9 -0
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +81 -66
- crawlo/pipelines/mysql_pipeline.py +165 -43
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/queue/queue_manager.py +4 -0
- crawlo/queue/redis_priority_queue.py +20 -3
- crawlo/settings/default_settings.py +119 -66
- crawlo/subscriber.py +62 -37
- crawlo/templates/project/items.py.tmpl +1 -1
- crawlo/templates/project/middlewares.py.tmpl +73 -49
- crawlo/templates/project/pipelines.py.tmpl +52 -290
- crawlo/templates/project/run.py.tmpl +20 -7
- crawlo/templates/project/settings.py.tmpl +35 -3
- crawlo/templates/spider/spider.py.tmpl +1 -37
- crawlo/utils/controlled_spider_mixin.py +109 -5
- crawlo-1.1.4.dist-info/METADATA +403 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/RECORD +40 -31
- examples/controlled_spider_example.py +205 -0
- crawlo-1.1.2.dist-info/METADATA +0 -567
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
|
@@ -5,332 +5,94 @@
|
|
|
5
5
|
数据管道,用于处理 Spider 返回的 Item。
|
|
6
6
|
例如:清理、验证、去重、保存到数据库等。
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
这是一个简单的示例管道,您可以根据需要添加更多管道。
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
import json
|
|
12
|
-
import asyncio
|
|
13
|
-
from typing import Dict, Any
|
|
14
12
|
from datetime import datetime
|
|
15
13
|
from crawlo.exceptions import DropItem
|
|
16
14
|
from crawlo.utils.log import get_logger
|
|
17
15
|
|
|
18
16
|
|
|
19
|
-
class
|
|
17
|
+
class ExamplePipeline:
|
|
20
18
|
"""
|
|
21
|
-
|
|
19
|
+
示例管道,演示如何处理数据项。
|
|
22
20
|
|
|
23
|
-
|
|
21
|
+
此管道会:
|
|
22
|
+
1. 验证必要字段
|
|
23
|
+
2. 清理数据
|
|
24
|
+
3. 添加时间戳
|
|
25
|
+
4. 记录处理日志
|
|
24
26
|
"""
|
|
25
27
|
|
|
26
28
|
def __init__(self):
|
|
27
29
|
self.logger = get_logger(self.__class__.__name__)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
30
|
+
self.item_count = 0
|
|
31
|
+
|
|
31
32
|
def process_item(self, item, spider):
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
33
|
+
"""
|
|
34
|
+
处理数据项。
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
item: 要处理的数据项
|
|
38
|
+
spider: 爬虫实例
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
处理后的数据项
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
DropItem: 如果数据项无效则抛出此异常
|
|
45
|
+
"""
|
|
46
|
+
# 验证必要字段
|
|
47
|
+
if not item.get('title') or not item.get('url'):
|
|
48
|
+
raise DropItem("缺少必要字段: title 或 url")
|
|
37
49
|
|
|
38
50
|
# 数据清理
|
|
39
|
-
|
|
40
|
-
item['title'] = str(item['title']).strip()
|
|
51
|
+
item['title'] = str(item['title']).strip()
|
|
41
52
|
|
|
42
|
-
#
|
|
43
|
-
item['
|
|
53
|
+
# 添加处理时间戳
|
|
54
|
+
item['processed_at'] = datetime.now().isoformat()
|
|
44
55
|
|
|
45
|
-
|
|
46
|
-
return item
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
class PrintItemPipeline:
|
|
50
|
-
"""
|
|
51
|
-
简单的打印管道,用于调试。
|
|
52
|
-
"""
|
|
53
|
-
|
|
54
|
-
def __init__(self):
|
|
55
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
56
|
-
self.item_count = 0
|
|
57
|
-
|
|
58
|
-
def process_item(self, item, spider):
|
|
59
|
-
"""打印数据项。"""
|
|
56
|
+
# 计数器
|
|
60
57
|
self.item_count += 1
|
|
61
|
-
self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
|
|
62
|
-
return item
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
class DuplicatesPipeline:
|
|
66
|
-
"""
|
|
67
|
-
去重管道。
|
|
68
|
-
|
|
69
|
-
基于指定字段进行去重,防止重复数据。
|
|
70
|
-
"""
|
|
71
|
-
|
|
72
|
-
def __init__(self):
|
|
73
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
74
|
-
self.seen = set()
|
|
75
|
-
# 去重字段(可以是 'url', 'id', 或其他唯一标识)
|
|
76
|
-
self.duplicate_field = 'url'
|
|
77
|
-
self.drop_count = 0
|
|
78
|
-
|
|
79
|
-
def process_item(self, item, spider):
|
|
80
|
-
"""检查并去除重复数据。"""
|
|
81
|
-
identifier = item.get(self.duplicate_field)
|
|
82
|
-
|
|
83
|
-
if not identifier:
|
|
84
|
-
self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
|
|
85
|
-
return item
|
|
86
58
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
|
|
90
|
-
raise DropItem(f"重复数据: {identifier}")
|
|
59
|
+
# 记录日志
|
|
60
|
+
self.logger.info(f"处理第 {self.item_count} 个数据项: {item['title']}")
|
|
91
61
|
|
|
92
|
-
self.seen.add(identifier)
|
|
93
62
|
return item
|
|
94
|
-
|
|
95
|
-
def close_spider(self, spider):
|
|
96
|
-
"""爬虫结束时输出统计信息。"""
|
|
97
|
-
self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
|
|
98
63
|
|
|
99
|
-
|
|
100
|
-
class JsonFilesPipeline:
|
|
101
|
-
"""
|
|
102
|
-
JSON 文件存储管道。
|
|
103
|
-
|
|
104
|
-
将每个数据项保存为单独的 JSON 文件。
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
def __init__(self, output_dir='output'):
|
|
108
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
109
|
-
self.output_dir = output_dir
|
|
110
|
-
self.file_count = 0
|
|
111
|
-
|
|
112
|
-
@classmethod
|
|
113
|
-
def from_crawler(cls, crawler):
|
|
114
|
-
"""从爬虫配置创建管道实例。"""
|
|
115
|
-
output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
|
|
116
|
-
return cls(output_dir=output_dir)
|
|
117
|
-
|
|
118
64
|
def open_spider(self, spider):
|
|
119
|
-
"""
|
|
120
|
-
|
|
121
|
-
os.makedirs(self.output_dir, exist_ok=True)
|
|
122
|
-
self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
|
|
123
|
-
|
|
124
|
-
def process_item(self, item, spider):
|
|
125
|
-
"""将数据项保存为 JSON 文件。"""
|
|
126
|
-
self.file_count += 1
|
|
127
|
-
filename = f"{spider.name}_{self.file_count:06d}.json"
|
|
128
|
-
filepath = f"{self.output_dir}/{filename}"
|
|
129
|
-
|
|
130
|
-
with open(filepath, 'w', encoding='utf-8') as f:
|
|
131
|
-
json.dump(dict(item), f, ensure_ascii=False, indent=2)
|
|
65
|
+
"""
|
|
66
|
+
爬虫启动时调用。
|
|
132
67
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
68
|
+
Args:
|
|
69
|
+
spider: 爬虫实例
|
|
70
|
+
"""
|
|
71
|
+
self.logger.info(f"管道已启动,准备处理爬虫 '{spider.name}' 的数据")
|
|
136
72
|
|
|
137
|
-
|
|
138
|
-
"""
|
|
139
|
-
数据库存储管道示例。
|
|
140
|
-
|
|
141
|
-
支持 MySQL 和 MongoDB,可根据需要选择。
|
|
142
|
-
"""
|
|
143
|
-
|
|
144
|
-
def __init__(self, database_type='mysql'):
|
|
145
|
-
self.logger = get_logger(self.__class__.__name__)
|
|
146
|
-
self.database_type = database_type
|
|
147
|
-
self.connection = None
|
|
148
|
-
self.batch_items = []
|
|
149
|
-
self.batch_size = 100 # 批量写入大小
|
|
150
|
-
|
|
151
|
-
@classmethod
|
|
152
|
-
def from_crawler(cls, crawler):
|
|
153
|
-
"""从爬虫配置创建管道实例。"""
|
|
154
|
-
db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
|
|
155
|
-
return cls(database_type=db_type)
|
|
156
|
-
|
|
157
|
-
async def open_spider(self, spider):
|
|
158
|
-
"""爬虫启动时连接数据库。"""
|
|
159
|
-
if self.database_type == 'mysql':
|
|
160
|
-
await self._connect_mysql(spider)
|
|
161
|
-
elif self.database_type == 'mongodb':
|
|
162
|
-
await self._connect_mongodb(spider)
|
|
163
|
-
else:
|
|
164
|
-
raise ValueError(f"不支持的数据库类型: {self.database_type}")
|
|
165
|
-
|
|
166
|
-
async def _connect_mysql(self, spider):
|
|
167
|
-
"""连接 MySQL 数据库。"""
|
|
168
|
-
try:
|
|
169
|
-
import aiomysql
|
|
170
|
-
|
|
171
|
-
settings = spider.crawler.settings
|
|
172
|
-
self.connection = await aiomysql.connect(
|
|
173
|
-
host=settings.get('MYSQL_HOST', '127.0.0.1'),
|
|
174
|
-
port=settings.get('MYSQL_PORT', 3306),
|
|
175
|
-
user=settings.get('MYSQL_USER', 'root'),
|
|
176
|
-
password=settings.get('MYSQL_PASSWORD', ''),
|
|
177
|
-
db=settings.get('MYSQL_DB', '{{project_name}}'),
|
|
178
|
-
charset='utf8mb4',
|
|
179
|
-
autocommit=True
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
# 创建表(如果不存在)
|
|
183
|
-
await self._create_mysql_table(spider)
|
|
184
|
-
self.logger.info("已连接到 MySQL 数据库")
|
|
185
|
-
|
|
186
|
-
except ImportError:
|
|
187
|
-
self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
|
|
188
|
-
raise
|
|
189
|
-
except Exception as e:
|
|
190
|
-
self.logger.error(f"MySQL 连接失败: {e}")
|
|
191
|
-
raise
|
|
192
|
-
|
|
193
|
-
async def _connect_mongodb(self, spider):
|
|
194
|
-
"""连接 MongoDB 数据库。"""
|
|
195
|
-
try:
|
|
196
|
-
from motor.motor_asyncio import AsyncIOMotorClient
|
|
197
|
-
|
|
198
|
-
settings = spider.crawler.settings
|
|
199
|
-
mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
|
|
200
|
-
|
|
201
|
-
self.connection = AsyncIOMotorClient(mongo_uri)
|
|
202
|
-
self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
|
|
203
|
-
self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
|
|
204
|
-
|
|
205
|
-
self.logger.info("已连接到 MongoDB 数据库")
|
|
206
|
-
|
|
207
|
-
except ImportError:
|
|
208
|
-
self.logger.error("缺少 motor 依赖,请安装: pip install motor")
|
|
209
|
-
raise
|
|
210
|
-
except Exception as e:
|
|
211
|
-
self.logger.error(f"MongoDB 连接失败: {e}")
|
|
212
|
-
raise
|
|
213
|
-
|
|
214
|
-
async def _create_mysql_table(self, spider):
|
|
215
|
-
"""创建 MySQL 表结构。"""
|
|
216
|
-
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
217
|
-
|
|
218
|
-
create_sql = f"""
|
|
219
|
-
CREATE TABLE IF NOT EXISTS `{table_name}` (
|
|
220
|
-
`id` bigint AUTO_INCREMENT PRIMARY KEY,
|
|
221
|
-
`title` varchar(500) DEFAULT NULL,
|
|
222
|
-
`url` varchar(1000) NOT NULL,
|
|
223
|
-
`content` text DEFAULT NULL,
|
|
224
|
-
`crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
|
|
225
|
-
`spider_name` varchar(100) DEFAULT NULL,
|
|
226
|
-
`extra_data` json DEFAULT NULL,
|
|
227
|
-
UNIQUE KEY `unique_url` (`url`(255)),
|
|
228
|
-
INDEX `idx_spider_name` (`spider_name`),
|
|
229
|
-
INDEX `idx_crawled_at` (`crawled_at`)
|
|
230
|
-
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
|
73
|
+
def close_spider(self, spider):
|
|
231
74
|
"""
|
|
75
|
+
爬虫关闭时调用。
|
|
232
76
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
self.logger.info(f"表 '{table_name}' 准备就绪")
|
|
236
|
-
|
|
237
|
-
async def process_item(self, item, spider):
|
|
238
|
-
"""处理数据项(批量存储)。"""
|
|
239
|
-
self.batch_items.append(dict(item))
|
|
240
|
-
|
|
241
|
-
# 批量存储
|
|
242
|
-
if len(self.batch_items) >= self.batch_size:
|
|
243
|
-
await self._save_batch(spider)
|
|
244
|
-
|
|
245
|
-
return item
|
|
246
|
-
|
|
247
|
-
async def _save_batch(self, spider):
|
|
248
|
-
"""批量保存数据。"""
|
|
249
|
-
if not self.batch_items:
|
|
250
|
-
return
|
|
251
|
-
|
|
252
|
-
try:
|
|
253
|
-
if self.database_type == 'mysql':
|
|
254
|
-
await self._save_to_mysql(spider)
|
|
255
|
-
elif self.database_type == 'mongodb':
|
|
256
|
-
await self._save_to_mongodb(spider)
|
|
257
|
-
|
|
258
|
-
self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
|
|
259
|
-
self.batch_items.clear()
|
|
260
|
-
|
|
261
|
-
except Exception as e:
|
|
262
|
-
self.logger.error(f"批量保存失败: {e}")
|
|
263
|
-
# 可以选择重试或记录失败数据
|
|
264
|
-
|
|
265
|
-
async def _save_to_mysql(self, spider):
|
|
266
|
-
"""保存到 MySQL。"""
|
|
267
|
-
table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
|
|
268
|
-
|
|
269
|
-
insert_sql = f"""
|
|
270
|
-
INSERT INTO `{table_name}`
|
|
271
|
-
(`title`, `url`, `content`, `spider_name`, `extra_data`)
|
|
272
|
-
VALUES (%s, %s, %s, %s, %s)
|
|
273
|
-
ON DUPLICATE KEY UPDATE
|
|
274
|
-
`title` = VALUES(`title`),
|
|
275
|
-
`content` = VALUES(`content`),
|
|
276
|
-
`crawled_at` = CURRENT_TIMESTAMP
|
|
77
|
+
Args:
|
|
78
|
+
spider: 爬虫实例
|
|
277
79
|
"""
|
|
278
|
-
|
|
279
|
-
async with self.connection.cursor() as cursor:
|
|
280
|
-
batch_data = []
|
|
281
|
-
for item in self.batch_items:
|
|
282
|
-
# 提取标准字段
|
|
283
|
-
title = item.get('title', '')[:500] # 限制长度
|
|
284
|
-
url = item.get('url', '')
|
|
285
|
-
content = item.get('content', '')
|
|
286
|
-
|
|
287
|
-
# 其他字段作为 JSON 存储
|
|
288
|
-
extra_fields = {k: v for k, v in item.items()
|
|
289
|
-
if k not in ['title', 'url', 'content']}
|
|
290
|
-
extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
|
|
291
|
-
|
|
292
|
-
batch_data.append((title, url, content, spider.name, extra_data))
|
|
293
|
-
|
|
294
|
-
await cursor.executemany(insert_sql, batch_data)
|
|
295
|
-
|
|
296
|
-
async def _save_to_mongodb(self, spider):
|
|
297
|
-
"""保存到 MongoDB。"""
|
|
298
|
-
# 为每个数据项添加 spider_name
|
|
299
|
-
for item in self.batch_items:
|
|
300
|
-
item['spider_name'] = spider.name
|
|
301
|
-
|
|
302
|
-
# 批量插入
|
|
303
|
-
await self.collection.insert_many(self.batch_items)
|
|
304
|
-
|
|
305
|
-
async def close_spider(self, spider):
|
|
306
|
-
"""爬虫结束时保存剩余数据并关闭连接。"""
|
|
307
|
-
# 保存剩余数据
|
|
308
|
-
if self.batch_items:
|
|
309
|
-
await self._save_batch(spider)
|
|
310
|
-
|
|
311
|
-
# 关闭连接
|
|
312
|
-
if self.connection:
|
|
313
|
-
if self.database_type == 'mysql':
|
|
314
|
-
self.connection.close()
|
|
315
|
-
elif self.database_type == 'mongodb':
|
|
316
|
-
self.connection.close()
|
|
317
|
-
|
|
318
|
-
self.logger.info("数据库连接已关闭")
|
|
80
|
+
self.logger.info(f"管道已关闭,共处理了 {self.item_count} 个数据项")
|
|
319
81
|
|
|
320
82
|
|
|
321
83
|
# ======================== 使用说明 ========================
|
|
322
84
|
#
|
|
323
85
|
# 在 settings.py 中启用管道:
|
|
324
86
|
# PIPELINES = [
|
|
325
|
-
# '{{project_name}}.pipelines.
|
|
326
|
-
# '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
|
|
327
|
-
# '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
|
|
328
|
-
# '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
|
|
329
|
-
# '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
|
|
87
|
+
# '{{project_name}}.pipelines.ExamplePipeline',
|
|
330
88
|
# ]
|
|
331
89
|
#
|
|
332
|
-
#
|
|
333
|
-
#
|
|
334
|
-
#
|
|
335
|
-
#
|
|
90
|
+
# 您可以根据需要添加更多管道,例如:
|
|
91
|
+
# 1. 数据验证管道
|
|
92
|
+
# 2. 去重管道
|
|
93
|
+
# 3. 数据存储管道(数据库、文件等)
|
|
94
|
+
# 4. 数据转换管道
|
|
95
|
+
#
|
|
96
|
+
# 每个管道都应该实现 process_item 方法,
|
|
97
|
+
# 可选实现 open_spider 和 close_spider 方法。
|
|
336
98
|
# ======================== 使用说明 ========================
|
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
python run.py spider_name # 单机模式运行
|
|
11
11
|
python run.py spider_name --distributed # 分布式模式运行
|
|
12
12
|
python run.py spider_name --env production # 使用预设配置
|
|
13
|
+
python run.py all # 运行所有爬虫
|
|
13
14
|
|
|
14
15
|
🔧 高级选项:
|
|
15
16
|
python run.py spider_name --dry-run # 干运行(不执行实际爬取)
|
|
@@ -50,6 +51,7 @@ def create_parser():
|
|
|
50
51
|
python run.py my_spider --distributed # 分布式模式
|
|
51
52
|
python run.py my_spider --env production # 生产环境配置
|
|
52
53
|
python run.py spider1 spider2 # 运行多个爬虫
|
|
54
|
+
python run.py all # 运行所有爬虫
|
|
53
55
|
python run.py my_spider --dry-run # 测试模式
|
|
54
56
|
"""
|
|
55
57
|
)
|
|
@@ -58,7 +60,7 @@ def create_parser():
|
|
|
58
60
|
parser.add_argument(
|
|
59
61
|
'spiders',
|
|
60
62
|
nargs='*',
|
|
61
|
-
help='
|
|
63
|
+
help='要运行的爬虫名称(可指定多个,"all"表示运行所有爬虫)'
|
|
62
64
|
)
|
|
63
65
|
|
|
64
66
|
# 运行模式选择
|
|
@@ -212,12 +214,23 @@ async def main():
|
|
|
212
214
|
# 应用配置并启动
|
|
213
215
|
process = CrawlerProcess(settings=config.to_dict())
|
|
214
216
|
|
|
215
|
-
#
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
217
|
+
# 检查是否要运行所有爬虫
|
|
218
|
+
if 'all' in [s.lower() for s in args.spiders]:
|
|
219
|
+
# 获取所有已注册的爬虫名称
|
|
220
|
+
spider_names = process.get_spider_names()
|
|
221
|
+
if not spider_names:
|
|
222
|
+
print("❌ 未找到任何爬虫")
|
|
223
|
+
print("💡 请确保:")
|
|
224
|
+
print(" • 爬虫定义在 'spiders/' 目录中")
|
|
225
|
+
print(" • 爬虫类有 'name' 属性")
|
|
226
|
+
return 1
|
|
227
|
+
|
|
228
|
+
print(f"📋 找到 {len(spider_names)} 个爬虫: {', '.join(spider_names)}")
|
|
229
|
+
# 运行所有爬虫
|
|
230
|
+
await process.crawl(spider_names)
|
|
231
|
+
else:
|
|
232
|
+
# 运行指定爬虫
|
|
233
|
+
await process.crawl(args.spiders)
|
|
221
234
|
|
|
222
235
|
print("\n✅ 所有爬虫执行完成")
|
|
223
236
|
|
|
@@ -64,9 +64,9 @@ locals().update(CONFIG.to_dict())
|
|
|
64
64
|
# ============================== 网络请求配置 ==============================
|
|
65
65
|
|
|
66
66
|
# 下载器选择(推荐使用 CurlCffi,支持浏览器指纹模拟)
|
|
67
|
-
DOWNLOADER = "crawlo.downloader.
|
|
67
|
+
DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader" # HTTP/2 支持
|
|
68
|
+
# DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader" # 支持浏览器指纹
|
|
68
69
|
# DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader" # 轻量级选择
|
|
69
|
-
# DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader" # HTTP/2 支持
|
|
70
70
|
|
|
71
71
|
# 请求超时与安全
|
|
72
72
|
DOWNLOAD_TIMEOUT = 30
|
|
@@ -120,6 +120,7 @@ MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
|
|
|
120
120
|
MYSQL_DB = os.getenv('MYSQL_DB', '{{project_name}}')
|
|
121
121
|
MYSQL_TABLE = '{{project_name}}_data'
|
|
122
122
|
MYSQL_BATCH_SIZE = 100
|
|
123
|
+
MYSQL_USE_BATCH = False # 是否启用批量插入
|
|
123
124
|
|
|
124
125
|
# MySQL 连接池
|
|
125
126
|
MYSQL_FLUSH_INTERVAL = 5
|
|
@@ -129,10 +130,12 @@ MYSQL_ECHO = False
|
|
|
129
130
|
|
|
130
131
|
# --- MongoDB 配置 ---
|
|
131
132
|
MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
|
|
132
|
-
MONGO_DATABASE =
|
|
133
|
+
MONGO_DATABASE = '{{project_name}}_db'
|
|
133
134
|
MONGO_COLLECTION = '{{project_name}}_items'
|
|
134
135
|
MONGO_MAX_POOL_SIZE = 200
|
|
135
136
|
MONGO_MIN_POOL_SIZE = 20
|
|
137
|
+
MONGO_BATCH_SIZE = 100 # 批量插入条数
|
|
138
|
+
MONGO_USE_BATCH = False # 是否启用批量插入
|
|
136
139
|
|
|
137
140
|
# ============================== 去重过滤配置 ==============================
|
|
138
141
|
|
|
@@ -177,6 +180,9 @@ MIDDLEWARES = [
|
|
|
177
180
|
# ============================== 数据管道配置 ==============================
|
|
178
181
|
|
|
179
182
|
PIPELINES = [
|
|
183
|
+
# 根据运行模式自动选择默认去重管道
|
|
184
|
+
# 单机模式:crawlo.pipelines.MemoryDedupPipeline
|
|
185
|
+
# 分布式模式:crawlo.pipelines.RedisDedupPipeline
|
|
180
186
|
'crawlo.pipelines.console_pipeline.ConsolePipeline',
|
|
181
187
|
# '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
|
|
182
188
|
# 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
|
|
@@ -189,8 +195,34 @@ EXTENSIONS = [
|
|
|
189
195
|
'crawlo.extension.log_interval.LogIntervalExtension',
|
|
190
196
|
'crawlo.extension.log_stats.LogStats',
|
|
191
197
|
'crawlo.extension.logging_extension.CustomLoggerExtension',
|
|
198
|
+
# 'crawlo.extension.memory_monitor.MemoryMonitorExtension', # 内存监控
|
|
199
|
+
# 'crawlo.extension.request_recorder.RequestRecorderExtension', # 请求记录
|
|
200
|
+
# 'crawlo.extension.performance_profiler.PerformanceProfilerExtension', # 性能分析
|
|
201
|
+
# 'crawlo.extension.health_check.HealthCheckExtension', # 健康检查
|
|
192
202
|
]
|
|
193
203
|
|
|
204
|
+
# ============================== 扩展配置 ==============================
|
|
205
|
+
|
|
206
|
+
# 内存监控扩展配置
|
|
207
|
+
# MEMORY_MONITOR_ENABLED = True # 是否启用内存监控
|
|
208
|
+
# MEMORY_MONITOR_INTERVAL = 60 # 内存检查间隔(秒)
|
|
209
|
+
# MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用警告阈值(百分比)
|
|
210
|
+
# MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用严重阈值(百分比)
|
|
211
|
+
|
|
212
|
+
# 请求记录扩展配置
|
|
213
|
+
# REQUEST_RECORDER_ENABLED = True # 是否启用请求记录
|
|
214
|
+
# REQUEST_RECORDER_OUTPUT_DIR = 'requests_log' # 请求记录输出目录
|
|
215
|
+
# REQUEST_RECORDER_MAX_FILE_SIZE = 10 * 1024 * 1024 # 单个记录文件最大大小(字节)
|
|
216
|
+
|
|
217
|
+
# 性能分析扩展配置
|
|
218
|
+
# PERFORMANCE_PROFILER_ENABLED = True # 是否启用性能分析
|
|
219
|
+
# PERFORMANCE_PROFILER_OUTPUT_DIR = 'profiling' # 性能分析输出目录
|
|
220
|
+
# PERFORMANCE_PROFILER_INTERVAL = 300 # 定期保存分析结果间隔(秒)
|
|
221
|
+
|
|
222
|
+
# 健康检查扩展配置
|
|
223
|
+
# HEALTH_CHECK_ENABLED = True # 是否启用健康检查
|
|
224
|
+
# HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
|
|
225
|
+
|
|
194
226
|
# ============================== 日志配置 ==============================
|
|
195
227
|
|
|
196
228
|
LOG_LEVEL = 'INFO'
|
|
@@ -139,40 +139,4 @@ class {{class_name}}(Spider):
|
|
|
139
139
|
# 'publish_time': response.xpath('//time/@datetime').get(),
|
|
140
140
|
# }
|
|
141
141
|
|
|
142
|
-
pass
|
|
143
|
-
|
|
144
|
-
def parse_error(self, failure):
|
|
145
|
-
"""
|
|
146
|
-
处理请求失败的方法(可选)。
|
|
147
|
-
|
|
148
|
-
当请求失败时会被调用。
|
|
149
|
-
"""
|
|
150
|
-
self.logger.error(f'请求失败: {failure.request.url} - {failure.value}')
|
|
151
|
-
|
|
152
|
-
# 可以选择重试或记录失败信息
|
|
153
|
-
# yield {
|
|
154
|
-
# 'error_url': failure.request.url,
|
|
155
|
-
# 'error_message': str(failure.value),
|
|
156
|
-
# 'error_type': failure.type.__name__,
|
|
157
|
-
# }
|
|
158
|
-
|
|
159
|
-
def spider_opened(self, spider):
|
|
160
|
-
"""
|
|
161
|
-
爬虫启动时的回调方法(可选)。
|
|
162
|
-
"""
|
|
163
|
-
self.logger.info(f'爬虫 {spider.name} 已启动')
|
|
164
|
-
|
|
165
|
-
# 初始化操作,例如连接数据库、加载配置等
|
|
166
|
-
# self.database = self.connect_database()
|
|
167
|
-
# self.cookies = self.load_cookies()
|
|
168
|
-
|
|
169
|
-
def spider_closed(self, spider, reason):
|
|
170
|
-
"""
|
|
171
|
-
爬虫关闭时的回调方法(可选)。
|
|
172
|
-
"""
|
|
173
|
-
self.logger.info(f'爬虫 {spider.name} 已关闭,原因: {reason}')
|
|
174
|
-
|
|
175
|
-
# 清理操作,例如关闭数据库连接、保存状态等
|
|
176
|
-
# if hasattr(self, 'database'):
|
|
177
|
-
# self.database.close()
|
|
178
|
-
# self.save_cookies()
|
|
142
|
+
pass
|