crawlo 1.1.3__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (115) hide show
  1. crawlo/__init__.py +28 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/commands/startproject.py +117 -13
  8. crawlo/config.py +30 -0
  9. crawlo/config_validator.py +253 -0
  10. crawlo/core/engine.py +185 -11
  11. crawlo/core/scheduler.py +49 -78
  12. crawlo/crawler.py +6 -6
  13. crawlo/downloader/__init__.py +24 -0
  14. crawlo/downloader/aiohttp_downloader.py +8 -0
  15. crawlo/downloader/cffi_downloader.py +5 -0
  16. crawlo/downloader/hybrid_downloader.py +214 -0
  17. crawlo/downloader/playwright_downloader.py +403 -0
  18. crawlo/downloader/selenium_downloader.py +473 -0
  19. crawlo/extension/__init__.py +17 -10
  20. crawlo/extension/health_check.py +142 -0
  21. crawlo/extension/log_interval.py +27 -18
  22. crawlo/extension/log_stats.py +62 -24
  23. crawlo/extension/logging_extension.py +18 -9
  24. crawlo/extension/memory_monitor.py +105 -0
  25. crawlo/extension/performance_profiler.py +134 -0
  26. crawlo/extension/request_recorder.py +108 -0
  27. crawlo/filters/aioredis_filter.py +50 -12
  28. crawlo/middleware/proxy.py +26 -2
  29. crawlo/mode_manager.py +24 -19
  30. crawlo/network/request.py +30 -3
  31. crawlo/network/response.py +114 -25
  32. crawlo/pipelines/mongo_pipeline.py +81 -66
  33. crawlo/pipelines/mysql_pipeline.py +165 -43
  34. crawlo/pipelines/redis_dedup_pipeline.py +7 -3
  35. crawlo/queue/queue_manager.py +15 -2
  36. crawlo/queue/redis_priority_queue.py +144 -76
  37. crawlo/settings/default_settings.py +93 -121
  38. crawlo/subscriber.py +62 -37
  39. crawlo/templates/project/items.py.tmpl +1 -1
  40. crawlo/templates/project/middlewares.py.tmpl +73 -49
  41. crawlo/templates/project/pipelines.py.tmpl +51 -295
  42. crawlo/templates/project/settings.py.tmpl +93 -17
  43. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  44. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  45. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  46. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  47. crawlo/templates/spider/spider.py.tmpl +2 -38
  48. crawlo/tools/__init__.py +183 -0
  49. crawlo/tools/anti_crawler.py +269 -0
  50. crawlo/tools/authenticated_proxy.py +241 -0
  51. crawlo/tools/data_validator.py +181 -0
  52. crawlo/tools/date_tools.py +36 -0
  53. crawlo/tools/distributed_coordinator.py +387 -0
  54. crawlo/tools/retry_mechanism.py +221 -0
  55. crawlo/tools/scenario_adapter.py +263 -0
  56. crawlo/utils/__init__.py +29 -1
  57. crawlo/utils/batch_processor.py +261 -0
  58. crawlo/utils/date_tools.py +58 -1
  59. crawlo/utils/enhanced_error_handler.py +360 -0
  60. crawlo/utils/env_config.py +106 -0
  61. crawlo/utils/error_handler.py +126 -0
  62. crawlo/utils/performance_monitor.py +285 -0
  63. crawlo/utils/redis_connection_pool.py +335 -0
  64. crawlo/utils/redis_key_validator.py +200 -0
  65. crawlo-1.1.5.dist-info/METADATA +401 -0
  66. crawlo-1.1.5.dist-info/RECORD +185 -0
  67. tests/advanced_tools_example.py +276 -0
  68. tests/authenticated_proxy_example.py +237 -0
  69. tests/cleaners_example.py +161 -0
  70. tests/config_validation_demo.py +103 -0
  71. tests/date_tools_example.py +181 -0
  72. tests/dynamic_loading_example.py +524 -0
  73. tests/dynamic_loading_test.py +105 -0
  74. tests/env_config_example.py +134 -0
  75. tests/error_handling_example.py +172 -0
  76. tests/redis_key_validation_demo.py +131 -0
  77. tests/response_improvements_example.py +145 -0
  78. tests/test_advanced_tools.py +149 -0
  79. tests/test_all_redis_key_configs.py +146 -0
  80. tests/test_authenticated_proxy.py +142 -0
  81. tests/test_cleaners.py +55 -0
  82. tests/test_comprehensive.py +147 -0
  83. tests/test_config_validator.py +194 -0
  84. tests/test_date_tools.py +124 -0
  85. tests/test_dynamic_downloaders_proxy.py +125 -0
  86. tests/test_dynamic_proxy.py +93 -0
  87. tests/test_dynamic_proxy_config.py +147 -0
  88. tests/test_dynamic_proxy_real.py +110 -0
  89. tests/test_edge_cases.py +304 -0
  90. tests/test_enhanced_error_handler.py +271 -0
  91. tests/test_env_config.py +122 -0
  92. tests/test_error_handler_compatibility.py +113 -0
  93. tests/test_framework_env_usage.py +104 -0
  94. tests/test_integration.py +357 -0
  95. tests/test_item_dedup_redis_key.py +123 -0
  96. tests/test_parsel.py +30 -0
  97. tests/test_performance.py +328 -0
  98. tests/test_queue_manager_redis_key.py +177 -0
  99. tests/test_redis_connection_pool.py +295 -0
  100. tests/test_redis_key_naming.py +182 -0
  101. tests/test_redis_key_validator.py +124 -0
  102. tests/test_response_improvements.py +153 -0
  103. tests/test_simple_response.py +62 -0
  104. tests/test_telecom_spider_redis_key.py +206 -0
  105. tests/test_template_content.py +88 -0
  106. tests/test_template_redis_key.py +135 -0
  107. tests/test_tools.py +154 -0
  108. tests/tools_example.py +258 -0
  109. crawlo/core/enhanced_engine.py +0 -190
  110. crawlo-1.1.3.dist-info/METADATA +0 -635
  111. crawlo-1.1.3.dist-info/RECORD +0 -113
  112. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  113. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  114. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
  115. {examples → tests}/controlled_spider_example.py +0 -0
@@ -5,338 +5,94 @@
5
5
  数据管道,用于处理 Spider 返回的 Item。
6
6
  例如:清理、验证、去重、保存到数据库等。
7
7
 
8
- 支持异步并发处理和多种存储后端。
8
+ 这是一个简单的示例管道,您可以根据需要添加更多管道。
9
9
  """
10
10
 
11
11
  import json
12
- import asyncio
13
- from typing import Dict, Any
14
12
  from datetime import datetime
15
13
  from crawlo.exceptions import DropItem
16
14
  from crawlo.utils.log import get_logger
17
15
 
18
16
 
19
- class ValidationPipeline:
17
+ class ExamplePipeline:
20
18
  """
21
- 数据验证管道。
19
+ 示例管道,演示如何处理数据项。
22
20
 
23
- 验证必要字段是否存在,过滤无效数据。
21
+ 此管道会:
22
+ 1. 验证必要字段
23
+ 2. 清理数据
24
+ 3. 添加时间戳
25
+ 4. 记录处理日志
24
26
  """
25
27
 
26
28
  def __init__(self):
27
29
  self.logger = get_logger(self.__class__.__name__)
28
- # 必要字段列表(根据实际需要修改)
29
- self.required_fields = ['title', 'url']
30
-
30
+ self.item_count = 0
31
+
31
32
  def process_item(self, item, spider):
32
- """验证数据项。"""
33
- # 检查必要字段
34
- for field in self.required_fields:
35
- if not item.get(field):
36
- raise DropItem(f"缺少必要字段: {field}")
33
+ """
34
+ 处理数据项。
35
+
36
+ Args:
37
+ item: 要处理的数据项
38
+ spider: 爬虫实例
39
+
40
+ Returns:
41
+ 处理后的数据项
42
+
43
+ Raises:
44
+ DropItem: 如果数据项无效则抛出此异常
45
+ """
46
+ # 验证必要字段
47
+ if not item.get('title') or not item.get('url'):
48
+ raise DropItem("缺少必要字段: title 或 url")
37
49
 
38
50
  # 数据清理
39
- if 'title' in item:
40
- item['title'] = str(item['title']).strip()
51
+ item['title'] = str(item['title']).strip()
41
52
 
42
- # 添加时间戳
43
- item['crawled_at'] = datetime.now().isoformat()
53
+ # 添加处理时间戳
54
+ item['processed_at'] = datetime.now().isoformat()
44
55
 
45
- self.logger.debug(f"数据验证通过: {item.get('url', 'Unknown URL')}")
46
- return item
47
-
48
-
49
- class PrintItemPipeline:
50
- """
51
- 简单的打印管道,用于调试。
52
- """
53
-
54
- def __init__(self):
55
- self.logger = get_logger(self.__class__.__name__)
56
- self.item_count = 0
57
-
58
- def process_item(self, item, spider):
59
- """打印数据项。"""
56
+ # 计数器
60
57
  self.item_count += 1
61
- self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
62
- return item
63
-
64
-
65
- class DuplicatesPipeline:
66
- """
67
- 去重管道。
68
-
69
- 基于指定字段进行去重,防止重复数据。
70
- """
71
-
72
- def __init__(self):
73
- self.logger = get_logger(self.__class__.__name__)
74
- self.seen = set()
75
- # 去重字段(可以是 'url', 'id', 或其他唯一标识)
76
- self.duplicate_field = 'url'
77
- self.drop_count = 0
78
-
79
- def process_item(self, item, spider):
80
- """检查并去除重复数据。"""
81
- identifier = item.get(self.duplicate_field)
82
58
 
83
- if not identifier:
84
- self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
85
- return item
59
+ # 记录日志
60
+ self.logger.info(f"处理第 {self.item_count} 个数据项: {item['title']}")
86
61
 
87
- if identifier in self.seen:
88
- self.drop_count += 1
89
- self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
90
- raise DropItem(f"重复数据: {identifier}")
91
-
92
- self.seen.add(identifier)
93
62
  return item
94
-
95
- def close_spider(self, spider):
96
- """爬虫结束时输出统计信息。"""
97
- self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
98
-
99
63
 
100
- class JsonFilesPipeline:
101
- """
102
- JSON 文件存储管道。
103
-
104
- 将每个数据项保存为单独的 JSON 文件。
105
- """
106
-
107
- def __init__(self, output_dir='output'):
108
- self.logger = get_logger(self.__class__.__name__)
109
- self.output_dir = output_dir
110
- self.file_count = 0
111
-
112
- @classmethod
113
- def from_crawler(cls, crawler):
114
- """从爬虫配置创建管道实例。"""
115
- output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
116
- return cls(output_dir=output_dir)
117
-
118
64
  def open_spider(self, spider):
119
- """爬虫启动时创建输出目录。"""
120
- import os
121
- os.makedirs(self.output_dir, exist_ok=True)
122
- self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
123
-
124
- def process_item(self, item, spider):
125
- """将数据项保存为 JSON 文件。"""
126
- self.file_count += 1
127
- filename = f"{spider.name}_{self.file_count:06d}.json"
128
- filepath = f"{self.output_dir}/{filename}"
129
-
130
- with open(filepath, 'w', encoding='utf-8') as f:
131
- json.dump(dict(item), f, ensure_ascii=False, indent=2)
65
+ """
66
+ 爬虫启动时调用。
132
67
 
133
- self.logger.debug(f"已保存: {filepath}")
134
- return item
135
-
68
+ Args:
69
+ spider: 爬虫实例
70
+ """
71
+ self.logger.info(f"管道已启动,准备处理爬虫 '{spider.name}' 的数据")
136
72
 
137
- class DatabasePipeline:
138
- """
139
- 数据库存储管道示例。
140
-
141
- 支持 MySQL 和 MongoDB,可根据需要选择。
142
- """
143
-
144
- def __init__(self, database_type='mysql'):
145
- self.logger = get_logger(self.__class__.__name__)
146
- self.database_type = database_type
147
- self.connection = None
148
- self.batch_items = []
149
- self.batch_size = 100 # 批量写入大小
150
-
151
- @classmethod
152
- def from_crawler(cls, crawler):
153
- """从爬虫配置创建管道实例。"""
154
- db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
155
- return cls(database_type=db_type)
156
-
157
- async def open_spider(self, spider):
158
- """爬虫启动时连接数据库。"""
159
- if self.database_type == 'mysql':
160
- await self._connect_mysql(spider)
161
- elif self.database_type == 'mongodb':
162
- await self._connect_mongodb(spider)
163
- else:
164
- raise ValueError(f"不支持的数据库类型: {self.database_type}")
165
-
166
- async def _connect_mysql(self, spider):
167
- """连接 MySQL 数据库。"""
168
- try:
169
- import aiomysql
170
-
171
- settings = spider.crawler.settings
172
- self.connection = await aiomysql.connect(
173
- host=settings.get('MYSQL_HOST', '127.0.0.1'),
174
- port=settings.get('MYSQL_PORT', 3306),
175
- user=settings.get('MYSQL_USER', 'root'),
176
- password=settings.get('MYSQL_PASSWORD', ''),
177
- db=settings.get('MYSQL_DB', '{{project_name}}'),
178
- charset='utf8mb4',
179
- autocommit=True
180
- )
181
-
182
- # 创建表(如果不存在)
183
- await self._create_mysql_table(spider)
184
- self.logger.info("已连接到 MySQL 数据库")
185
-
186
- except ImportError:
187
- self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
188
- raise
189
- except Exception as e:
190
- self.logger.error(f"MySQL 连接失败: {e}")
191
- raise
192
-
193
- async def _connect_mongodb(self, spider):
194
- """连接 MongoDB 数据库。"""
195
- try:
196
- from motor.motor_asyncio import AsyncIOMotorClient
197
-
198
- settings = spider.crawler.settings
199
- mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
200
-
201
- self.connection = AsyncIOMotorClient(mongo_uri)
202
- self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
203
- self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
204
-
205
- self.logger.info("已连接到 MongoDB 数据库")
206
-
207
- except ImportError:
208
- self.logger.error("缺少 motor 依赖,请安装: pip install motor")
209
- raise
210
- except Exception as e:
211
- self.logger.error(f"MongoDB 连接失败: {e}")
212
- raise
213
-
214
- async def _create_mysql_table(self, spider):
215
- """创建 MySQL 表结构。"""
216
- table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
217
-
218
- create_sql = f"""
219
- CREATE TABLE IF NOT EXISTS `{table_name}` (
220
- `id` bigint AUTO_INCREMENT PRIMARY KEY,
221
- `title` varchar(500) DEFAULT NULL,
222
- `url` varchar(1000) NOT NULL,
223
- `content` text DEFAULT NULL,
224
- `crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
225
- `spider_name` varchar(100) DEFAULT NULL,
226
- `extra_data` json DEFAULT NULL,
227
- UNIQUE KEY `unique_url` (`url`(255)),
228
- INDEX `idx_spider_name` (`spider_name`),
229
- INDEX `idx_crawled_at` (`crawled_at`)
230
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
73
+ def close_spider(self, spider):
231
74
  """
75
+ 爬虫关闭时调用。
232
76
 
233
- async with self.connection.cursor() as cursor:
234
- await cursor.execute(create_sql)
235
- self.logger.info(f"表 '{table_name}' 准备就绪")
236
-
237
- async def process_item(self, item, spider):
238
- """处理数据项(批量存储)。"""
239
- self.batch_items.append(dict(item))
240
-
241
- # 批量存储
242
- if len(self.batch_items) >= self.batch_size:
243
- await self._save_batch(spider)
244
-
245
- return item
246
-
247
- async def _save_batch(self, spider):
248
- """批量保存数据。"""
249
- if not self.batch_items:
250
- return
251
-
252
- try:
253
- if self.database_type == 'mysql':
254
- await self._save_to_mysql(spider)
255
- elif self.database_type == 'mongodb':
256
- await self._save_to_mongodb(spider)
257
-
258
- self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
259
- self.batch_items.clear()
260
-
261
- except Exception as e:
262
- self.logger.error(f"批量保存失败: {e}")
263
- # 可以选择重试或记录失败数据
264
-
265
- async def _save_to_mysql(self, spider):
266
- """保存到 MySQL。"""
267
- table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
268
-
269
- insert_sql = f"""
270
- INSERT INTO `{table_name}`
271
- (`title`, `url`, `content`, `spider_name`, `extra_data`)
272
- VALUES (%s, %s, %s, %s, %s)
273
- ON DUPLICATE KEY UPDATE
274
- `title` = VALUES(`title`),
275
- `content` = VALUES(`content`),
276
- `crawled_at` = CURRENT_TIMESTAMP
77
+ Args:
78
+ spider: 爬虫实例
277
79
  """
278
-
279
- async with self.connection.cursor() as cursor:
280
- batch_data = []
281
- for item in self.batch_items:
282
- # 提取标准字段
283
- title = item.get('title', '')[:500] # 限制长度
284
- url = item.get('url', '')
285
- content = item.get('content', '')
286
-
287
- # 其他字段作为 JSON 存储
288
- extra_fields = {k: v for k, v in item.items()
289
- if k not in ['title', 'url', 'content']}
290
- extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
291
-
292
- batch_data.append((title, url, content, spider.name, extra_data))
293
-
294
- await cursor.executemany(insert_sql, batch_data)
295
-
296
- async def _save_to_mongodb(self, spider):
297
- """保存到 MongoDB。"""
298
- # 为每个数据项添加 spider_name
299
- for item in self.batch_items:
300
- item['spider_name'] = spider.name
301
-
302
- # 批量插入
303
- await self.collection.insert_many(self.batch_items)
304
-
305
- async def close_spider(self, spider):
306
- """爬虫结束时保存剩余数据并关闭连接。"""
307
- # 保存剩余数据
308
- if self.batch_items:
309
- await self._save_batch(spider)
310
-
311
- # 关闭连接
312
- if self.connection:
313
- if self.database_type == 'mysql':
314
- self.connection.close()
315
- elif self.database_type == 'mongodb':
316
- self.connection.close()
317
-
318
- self.logger.info("数据库连接已关闭")
80
+ self.logger.info(f"管道已关闭,共处理了 {self.item_count} 个数据项")
319
81
 
320
82
 
321
83
  # ======================== 使用说明 ========================
322
84
  #
323
85
  # 在 settings.py 中启用管道:
324
86
  # PIPELINES = [
325
- # '{{project_name}}.pipelines.ValidationPipeline', # 数据验证
326
- # '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
327
- # '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
328
- # '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
329
- # '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
87
+ # '{{project_name}}.pipelines.ExamplePipeline',
330
88
  # ]
331
89
  #
332
- # Crawlo 框架提供了多种内置的去重管道:
333
- # 1. crawlo.pipelines.MemoryDedupPipeline - 内存去重(单机模式默认)
334
- # 2. crawlo.pipelines.RedisDedupPipeline - Redis去重(分布式模式默认)
335
- # 3. crawlo.pipelines.BloomDedupPipeline - Bloom Filter去重(大规模数据)
336
- # 4. crawlo.pipelines.DatabaseDedupPipeline - 数据库去重(持久化)
90
+ # 您可以根据需要添加更多管道,例如:
91
+ # 1. 数据验证管道
92
+ # 2. 去重管道
93
+ # 3. 数据存储管道(数据库、文件等)
94
+ # 4. 数据转换管道
337
95
  #
338
- # 相关配置:
339
- # JSON_OUTPUT_DIR = 'output' # JSON 文件输出目录
340
- # DATABASE_TYPE = 'mysql' # 数据库类型: mysql/mongodb
341
- # MYSQL_TABLE = '{{project_name}}_data' # MySQL 表名
96
+ # 每个管道都应该实现 process_item 方法,
97
+ # 可选实现 open_spider close_spider 方法。
342
98
  # ======================== 使用说明 ========================
@@ -64,9 +64,9 @@ locals().update(CONFIG.to_dict())
64
64
  # ============================== 网络请求配置 ==============================
65
65
 
66
66
  # 下载器选择(推荐使用 CurlCffi,支持浏览器指纹模拟)
67
- DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader" # 支持浏览器指纹
67
+ DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader" # HTTP/2 支持
68
+ # DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader" # 支持浏览器指纹
68
69
  # DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader" # 轻量级选择
69
- # DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader" # HTTP/2 支持
70
70
 
71
71
  # 请求超时与安全
72
72
  DOWNLOAD_TIMEOUT = 30
@@ -89,26 +89,35 @@ ALLOWED_CODES = []
89
89
  CONNECTION_POOL_LIMIT = 50
90
90
  DOWNLOAD_MAXSIZE = 10 * 1024 * 1024 # 10MB
91
91
  DOWNLOAD_WARN_SIZE = 1024 * 1024 # 1MB
92
+ DOWNLOAD_RETRY_TIMES = MAX_RETRY_TIMES # 下载器内部重试次数(复用全局)
93
+
94
+ # 下载统计配置
95
+ DOWNLOADER_STATS = True # 是否启用下载器统计功能
96
+ DOWNLOAD_STATS = True # 是否记录下载时间和大小统计
92
97
 
93
98
  # ============================== 并发与调度配置 ==============================
99
+
94
100
  CONCURRENCY = 8
95
101
  INTERVAL = 5
96
102
  DEPTH_PRIORITY = 1
97
103
  MAX_RUNNING_SPIDERS = 3
98
104
 
105
+ # 运行模式选择:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
106
+ RUN_MODE = 'standalone' # 默认单机模式,简单易用
107
+
99
108
  # ============================== 队列配置(支持分布式) ==============================
100
109
 
101
110
  # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
102
111
  QUEUE_TYPE = 'auto'
103
112
  SCHEDULER_MAX_QUEUE_SIZE = 2000
104
- SCHEDULER_QUEUE_NAME = f'{{project_name}}:requests'
113
+ SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests' # 使用统一命名规范
105
114
  QUEUE_MAX_RETRIES = 3
106
115
  QUEUE_TIMEOUT = 300
107
116
 
108
117
  # 大规模爬取优化
109
- LARGE_SCALE_BATCH_SIZE = 1000
110
- LARGE_SCALE_CHECKPOINT_INTERVAL = 5000
111
- LARGE_SCALE_MAX_MEMORY_USAGE = 500
118
+ LARGE_SCALE_BATCH_SIZE = 1000 # 批处理大小
119
+ LARGE_SCALE_CHECKPOINT_INTERVAL = 5000 # 进度保存间隔
120
+ LARGE_SCALE_MAX_MEMORY_USAGE = 500 # 最大内存使用量(MB)
112
121
 
113
122
  # ============================== 数据存储配置 ==============================
114
123
 
@@ -120,24 +129,31 @@ MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
120
129
  MYSQL_DB = os.getenv('MYSQL_DB', '{{project_name}}')
121
130
  MYSQL_TABLE = '{{project_name}}_data'
122
131
  MYSQL_BATCH_SIZE = 100
123
-
124
- # MySQL 连接池
125
- MYSQL_FLUSH_INTERVAL = 5
126
- MYSQL_POOL_MIN = 5
127
- MYSQL_POOL_MAX = 20
128
- MYSQL_ECHO = False
132
+ MYSQL_USE_BATCH = False # 是否启用批量插入
129
133
 
130
134
  # --- MongoDB 配置 ---
131
135
  MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
132
- MONGO_DATABASE = f'{{project_name}}_db'
136
+ MONGO_DATABASE = '{{project_name}}_db'
133
137
  MONGO_COLLECTION = '{{project_name}}_items'
134
138
  MONGO_MAX_POOL_SIZE = 200
135
139
  MONGO_MIN_POOL_SIZE = 20
140
+ MONGO_BATCH_SIZE = 100 # 批量插入条数
141
+ MONGO_USE_BATCH = False # 是否启用批量插入
136
142
 
137
143
  # ============================== 去重过滤配置 ==============================
138
144
 
139
145
  REQUEST_DIR = '.'
140
146
 
147
+ # 根据运行模式自动选择去重管道
148
+ # 单机模式默认使用内存去重管道
149
+ # 分布式模式默认使用Redis去重管道
150
+ if RUN_MODE == 'distributed':
151
+ # 分布式模式下默认使用Redis去重管道
152
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.RedisDedupPipeline'
153
+ else:
154
+ # 单机模式下默认使用内存去重管道
155
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.MemoryDedupPipeline'
156
+
141
157
  # 去重过滤器(推荐分布式项目使用 Redis 过滤器)
142
158
  FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
143
159
  # FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter' # 分布式去重
@@ -153,7 +169,13 @@ if REDIS_PASSWORD:
153
169
  else:
154
170
  REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
155
171
 
156
- REDIS_KEY = f'{{project_name}}:fingerprint'
172
+ # Redis key配置已移至各组件中,使用统一的命名规范
173
+ # crawlo:{project_name}:filter:fingerprint (请求去重)
174
+ # crawlo:{project_name}:item:fingerprint (数据项去重)
175
+ # crawlo:{project_name}:queue:requests (请求队列)
176
+ # crawlo:{project_name}:queue:processing (处理中队列)
177
+ # crawlo:{project_name}:queue:failed (失败队列)
178
+
157
179
  REDIS_TTL = 0
158
180
  CLEANUP_FP = 0
159
181
  FILTER_DEBUG = True
@@ -176,24 +198,56 @@ MIDDLEWARES = [
176
198
 
177
199
  # ============================== 数据管道配置 ==============================
178
200
 
201
+ # 数据处理管道(启用的存储方式)
179
202
  PIPELINES = [
180
- # 根据运行模式自动选择默认去重管道
181
- # 单机模式:crawlo.pipelines.MemoryDedupPipeline
182
- # 分布式模式:crawlo.pipelines.RedisDedupPipeline
183
203
  'crawlo.pipelines.console_pipeline.ConsolePipeline',
184
204
  # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
185
205
  # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
186
206
  # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
187
207
  ]
188
208
 
209
+ # 根据运行模式自动配置默认去重管道
210
+ if RUN_MODE == 'distributed':
211
+ # 分布式模式下添加Redis去重管道
212
+ PIPELINES.insert(0, DEFAULT_DEDUP_PIPELINE)
213
+ else:
214
+ # 单机模式下添加内存去重管道
215
+ PIPELINES.insert(0, DEFAULT_DEDUP_PIPELINE)
216
+
189
217
  # ============================== 扩展组件 ==============================
190
218
 
191
219
  EXTENSIONS = [
192
220
  'crawlo.extension.log_interval.LogIntervalExtension',
193
221
  'crawlo.extension.log_stats.LogStats',
194
222
  'crawlo.extension.logging_extension.CustomLoggerExtension',
223
+ # 'crawlo.extension.memory_monitor.MemoryMonitorExtension', # 内存监控
224
+ # 'crawlo.extension.request_recorder.RequestRecorderExtension', # 请求记录
225
+ # 'crawlo.extension.performance_profiler.PerformanceProfilerExtension', # 性能分析
226
+ # 'crawlo.extension.health_check.HealthCheckExtension', # 健康检查
195
227
  ]
196
228
 
229
+ # ============================== 扩展配置 ==============================
230
+
231
+ # 内存监控扩展配置
232
+ # MEMORY_MONITOR_ENABLED = True # 是否启用内存监控
233
+ # MEMORY_MONITOR_INTERVAL = 60 # 内存检查间隔(秒)
234
+ # MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用警告阈值(百分比)
235
+ # MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用严重阈值(百分比)
236
+
237
+ # 请求记录扩展配置
238
+ # REQUEST_RECORDER_ENABLED = True # 是否启用请求记录
239
+ # REQUEST_RECORDER_OUTPUT_DIR = 'requests_log' # 请求记录输出目录
240
+ # REQUEST_RECORDER_MAX_FILE_SIZE = 10 * 1024 * 1024 # 单个记录文件最大大小(字节)
241
+
242
+ # 性能分析扩展配置
243
+ # PERFORMANCE_PROFILER_ENABLED = True # 是否启用性能分析
244
+ # PERFORMANCE_PROFILER_OUTPUT_DIR = 'profiling' # 性能分析输出目录
245
+ # PERFORMANCE_PROFILER_INTERVAL = 300 # 定期保存分析结果间隔(秒)
246
+
247
+ # 健康检查扩展配置
248
+ # HEALTH_CHECK_ENABLED = True # 是否启用健康检查
249
+ # HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
250
+
197
251
  # ============================== 日志配置 ==============================
198
252
 
199
253
  LOG_LEVEL = 'INFO'
@@ -232,6 +286,28 @@ DEFAULT_REQUEST_HEADERS = {
232
286
  'Upgrade-Insecure-Requests': '1',
233
287
  }
234
288
 
289
+ # ============================== 下载器优化配置 ==============================
290
+
291
+ # 下载器健康检查
292
+ DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
293
+ HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
294
+
295
+ # 请求统计配置
296
+ REQUEST_STATS_ENABLED = True # 是否启用请求统计
297
+ STATS_RESET_ON_START = False # 启动时是否重置统计
298
+
299
+ # HttpX 下载器专用配置
300
+ HTTPX_HTTP2 = True # 是否启用HTTP/2支持
301
+ HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
302
+
303
+ # AioHttp 下载器专用配置
304
+ AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
305
+ AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
306
+
307
+ # 通用优化配置
308
+ CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
309
+ CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
310
+
235
311
  # ============================== 开发与调试 ==============================
236
312
 
237
313
  # 开发模式配置