crawlo 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (128) hide show
  1. crawlo/__init__.py +34 -33
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +152 -126
  7. crawlo/commands/list.py +156 -147
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -111
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -57
  18. crawlo/crawler.py +1028 -495
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +251 -241
  22. crawlo/downloader/httpx_downloader.py +259 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +271 -166
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +317 -0
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +219 -0
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/__init__.py +0 -0
  62. crawlo/queue/pqueue.py +37 -0
  63. crawlo/queue/queue_manager.py +308 -0
  64. crawlo/queue/redis_priority_queue.py +209 -0
  65. crawlo/settings/__init__.py +7 -7
  66. crawlo/settings/default_settings.py +245 -167
  67. crawlo/settings/setting_manager.py +99 -99
  68. crawlo/spider/__init__.py +639 -129
  69. crawlo/stats_collector.py +59 -59
  70. crawlo/subscriber.py +106 -106
  71. crawlo/task_manager.py +30 -27
  72. crawlo/templates/crawlo.cfg.tmpl +10 -10
  73. crawlo/templates/project/__init__.py.tmpl +3 -3
  74. crawlo/templates/project/items.py.tmpl +17 -17
  75. crawlo/templates/project/middlewares.py.tmpl +87 -76
  76. crawlo/templates/project/pipelines.py.tmpl +342 -64
  77. crawlo/templates/project/run.py.tmpl +252 -0
  78. crawlo/templates/project/settings.py.tmpl +251 -54
  79. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  80. crawlo/templates/spider/spider.py.tmpl +178 -32
  81. crawlo/utils/__init__.py +7 -7
  82. crawlo/utils/controlled_spider_mixin.py +440 -0
  83. crawlo/utils/date_tools.py +233 -233
  84. crawlo/utils/db_helper.py +343 -343
  85. crawlo/utils/func_tools.py +82 -82
  86. crawlo/utils/large_scale_config.py +287 -0
  87. crawlo/utils/large_scale_helper.py +344 -0
  88. crawlo/utils/log.py +128 -128
  89. crawlo/utils/queue_helper.py +176 -0
  90. crawlo/utils/request.py +267 -267
  91. crawlo/utils/request_serializer.py +220 -0
  92. crawlo/utils/spider_loader.py +62 -62
  93. crawlo/utils/system.py +11 -11
  94. crawlo/utils/tools.py +4 -4
  95. crawlo/utils/url.py +39 -39
  96. crawlo-1.1.3.dist-info/METADATA +635 -0
  97. crawlo-1.1.3.dist-info/RECORD +113 -0
  98. examples/__init__.py +7 -7
  99. examples/controlled_spider_example.py +205 -0
  100. tests/__init__.py +7 -7
  101. tests/test_final_validation.py +154 -0
  102. tests/test_proxy_health_check.py +32 -32
  103. tests/test_proxy_middleware_integration.py +136 -136
  104. tests/test_proxy_providers.py +56 -56
  105. tests/test_proxy_stats.py +19 -19
  106. tests/test_proxy_strategies.py +59 -59
  107. tests/test_redis_config.py +29 -0
  108. tests/test_redis_queue.py +225 -0
  109. tests/test_request_serialization.py +71 -0
  110. tests/test_scheduler.py +242 -0
  111. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  112. crawlo/utils/pqueue.py +0 -174
  113. crawlo-1.1.1.dist-info/METADATA +0 -220
  114. crawlo-1.1.1.dist-info/RECORD +0 -100
  115. examples/baidu_spider/__init__.py +0 -7
  116. examples/baidu_spider/demo.py +0 -94
  117. examples/baidu_spider/items.py +0 -46
  118. examples/baidu_spider/middleware.py +0 -49
  119. examples/baidu_spider/pipeline.py +0 -55
  120. examples/baidu_spider/run.py +0 -27
  121. examples/baidu_spider/settings.py +0 -121
  122. examples/baidu_spider/spiders/__init__.py +0 -7
  123. examples/baidu_spider/spiders/bai_du.py +0 -61
  124. examples/baidu_spider/spiders/miit.py +0 -159
  125. examples/baidu_spider/spiders/sina.py +0 -79
  126. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  127. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  128. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,64 +1,342 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- {{project_name}}.pipelines
4
- ==========================
5
- 数据管道,用于处理 Spider 返回的 Item。
6
- 例如:清理、验证、去重、保存到数据库等。
7
- """
8
-
9
- class PrintItemPipeline:
10
- """
11
- 一个简单的管道,用于打印 Item。
12
- """
13
-
14
- def process_item(self, item, spider):
15
- print(f"Pipeline received item: {dict(item)}")
16
- return item
17
-
18
-
19
- class DuplicatesPipeline:
20
- """
21
- 一个去重管道示例。
22
- """
23
- def __init__(self):
24
- self.seen = set()
25
-
26
- def process_item(self, item, spider):
27
- identifier = item.get('id') or item.get('url')
28
- if identifier in self.seen:
29
- spider.logger.debug(f"Duplicate item found: {identifier}")
30
- raise DropItem(f"Duplicate item: {identifier}")
31
- self.seen.add(identifier)
32
- return item
33
-
34
-
35
- # class MySQLPipeline:
36
- # """
37
- # 将 Item 保存到 MySQL 的管道示例。
38
- # """
39
- # def __init__(self, mysql_uri, mysql_user, mysql_password, mysql_db):
40
- # self.mysql_uri = mysql_uri
41
- # self.mysql_user = mysql_user
42
- # self.mysql_password = mysql_password
43
- # self.mysql_db = mysql_db
44
- # self.connection = None
45
- #
46
- # @classmethod
47
- # def from_settings(cls, settings):
48
- # return cls(
49
- # mysql_uri=settings.get('MYSQL_HOST'),
50
- # mysql_user=settings.get('MYSQL_USER'),
51
- # mysql_password=settings.get('MYSQL_PASSWORD'),
52
- # mysql_db=settings.get('MYSQL_DB')
53
- # )
54
- #
55
- # def open_spider(self, spider):
56
- # self.connection = pymysql.connect(...)
57
- #
58
- # def close_spider(self, spider):
59
- # if self.connection:
60
- # self.connection.close()
61
- #
62
- # def process_item(self, item, spider):
63
- # # 执行 SQL 插入
64
- # return item
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.pipelines
4
+ ==========================
5
+ 数据管道,用于处理 Spider 返回的 Item。
6
+ 例如:清理、验证、去重、保存到数据库等。
7
+
8
+ 支持异步并发处理和多种存储后端。
9
+ """
10
+
11
+ import json
12
+ import asyncio
13
+ from typing import Dict, Any
14
+ from datetime import datetime
15
+ from crawlo.exceptions import DropItem
16
+ from crawlo.utils.log import get_logger
17
+
18
+
19
+ class ValidationPipeline:
20
+ """
21
+ 数据验证管道。
22
+
23
+ 验证必要字段是否存在,过滤无效数据。
24
+ """
25
+
26
+ def __init__(self):
27
+ self.logger = get_logger(self.__class__.__name__)
28
+ # 必要字段列表(根据实际需要修改)
29
+ self.required_fields = ['title', 'url']
30
+
31
+ def process_item(self, item, spider):
32
+ """验证数据项。"""
33
+ # 检查必要字段
34
+ for field in self.required_fields:
35
+ if not item.get(field):
36
+ raise DropItem(f"缺少必要字段: {field}")
37
+
38
+ # 数据清理
39
+ if 'title' in item:
40
+ item['title'] = str(item['title']).strip()
41
+
42
+ # 添加时间戳
43
+ item['crawled_at'] = datetime.now().isoformat()
44
+
45
+ self.logger.debug(f"数据验证通过: {item.get('url', 'Unknown URL')}")
46
+ return item
47
+
48
+
49
+ class PrintItemPipeline:
50
+ """
51
+ 简单的打印管道,用于调试。
52
+ """
53
+
54
+ def __init__(self):
55
+ self.logger = get_logger(self.__class__.__name__)
56
+ self.item_count = 0
57
+
58
+ def process_item(self, item, spider):
59
+ """打印数据项。"""
60
+ self.item_count += 1
61
+ self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
62
+ return item
63
+
64
+
65
+ class DuplicatesPipeline:
66
+ """
67
+ 去重管道。
68
+
69
+ 基于指定字段进行去重,防止重复数据。
70
+ """
71
+
72
+ def __init__(self):
73
+ self.logger = get_logger(self.__class__.__name__)
74
+ self.seen = set()
75
+ # 去重字段(可以是 'url', 'id', 或其他唯一标识)
76
+ self.duplicate_field = 'url'
77
+ self.drop_count = 0
78
+
79
+ def process_item(self, item, spider):
80
+ """检查并去除重复数据。"""
81
+ identifier = item.get(self.duplicate_field)
82
+
83
+ if not identifier:
84
+ self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
85
+ return item
86
+
87
+ if identifier in self.seen:
88
+ self.drop_count += 1
89
+ self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
90
+ raise DropItem(f"重复数据: {identifier}")
91
+
92
+ self.seen.add(identifier)
93
+ return item
94
+
95
+ def close_spider(self, spider):
96
+ """爬虫结束时输出统计信息。"""
97
+ self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
98
+
99
+
100
+ class JsonFilesPipeline:
101
+ """
102
+ JSON 文件存储管道。
103
+
104
+ 将每个数据项保存为单独的 JSON 文件。
105
+ """
106
+
107
+ def __init__(self, output_dir='output'):
108
+ self.logger = get_logger(self.__class__.__name__)
109
+ self.output_dir = output_dir
110
+ self.file_count = 0
111
+
112
+ @classmethod
113
+ def from_crawler(cls, crawler):
114
+ """从爬虫配置创建管道实例。"""
115
+ output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
116
+ return cls(output_dir=output_dir)
117
+
118
+ def open_spider(self, spider):
119
+ """爬虫启动时创建输出目录。"""
120
+ import os
121
+ os.makedirs(self.output_dir, exist_ok=True)
122
+ self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
123
+
124
+ def process_item(self, item, spider):
125
+ """将数据项保存为 JSON 文件。"""
126
+ self.file_count += 1
127
+ filename = f"{spider.name}_{self.file_count:06d}.json"
128
+ filepath = f"{self.output_dir}/{filename}"
129
+
130
+ with open(filepath, 'w', encoding='utf-8') as f:
131
+ json.dump(dict(item), f, ensure_ascii=False, indent=2)
132
+
133
+ self.logger.debug(f"已保存: {filepath}")
134
+ return item
135
+
136
+
137
+ class DatabasePipeline:
138
+ """
139
+ 数据库存储管道示例。
140
+
141
+ 支持 MySQL 和 MongoDB,可根据需要选择。
142
+ """
143
+
144
+ def __init__(self, database_type='mysql'):
145
+ self.logger = get_logger(self.__class__.__name__)
146
+ self.database_type = database_type
147
+ self.connection = None
148
+ self.batch_items = []
149
+ self.batch_size = 100 # 批量写入大小
150
+
151
+ @classmethod
152
+ def from_crawler(cls, crawler):
153
+ """从爬虫配置创建管道实例。"""
154
+ db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
155
+ return cls(database_type=db_type)
156
+
157
+ async def open_spider(self, spider):
158
+ """爬虫启动时连接数据库。"""
159
+ if self.database_type == 'mysql':
160
+ await self._connect_mysql(spider)
161
+ elif self.database_type == 'mongodb':
162
+ await self._connect_mongodb(spider)
163
+ else:
164
+ raise ValueError(f"不支持的数据库类型: {self.database_type}")
165
+
166
+ async def _connect_mysql(self, spider):
167
+ """连接 MySQL 数据库。"""
168
+ try:
169
+ import aiomysql
170
+
171
+ settings = spider.crawler.settings
172
+ self.connection = await aiomysql.connect(
173
+ host=settings.get('MYSQL_HOST', '127.0.0.1'),
174
+ port=settings.get('MYSQL_PORT', 3306),
175
+ user=settings.get('MYSQL_USER', 'root'),
176
+ password=settings.get('MYSQL_PASSWORD', ''),
177
+ db=settings.get('MYSQL_DB', '{{project_name}}'),
178
+ charset='utf8mb4',
179
+ autocommit=True
180
+ )
181
+
182
+ # 创建表(如果不存在)
183
+ await self._create_mysql_table(spider)
184
+ self.logger.info("已连接到 MySQL 数据库")
185
+
186
+ except ImportError:
187
+ self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
188
+ raise
189
+ except Exception as e:
190
+ self.logger.error(f"MySQL 连接失败: {e}")
191
+ raise
192
+
193
+ async def _connect_mongodb(self, spider):
194
+ """连接 MongoDB 数据库。"""
195
+ try:
196
+ from motor.motor_asyncio import AsyncIOMotorClient
197
+
198
+ settings = spider.crawler.settings
199
+ mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
200
+
201
+ self.connection = AsyncIOMotorClient(mongo_uri)
202
+ self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
203
+ self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
204
+
205
+ self.logger.info("已连接到 MongoDB 数据库")
206
+
207
+ except ImportError:
208
+ self.logger.error("缺少 motor 依赖,请安装: pip install motor")
209
+ raise
210
+ except Exception as e:
211
+ self.logger.error(f"MongoDB 连接失败: {e}")
212
+ raise
213
+
214
+ async def _create_mysql_table(self, spider):
215
+ """创建 MySQL 表结构。"""
216
+ table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
217
+
218
+ create_sql = f"""
219
+ CREATE TABLE IF NOT EXISTS `{table_name}` (
220
+ `id` bigint AUTO_INCREMENT PRIMARY KEY,
221
+ `title` varchar(500) DEFAULT NULL,
222
+ `url` varchar(1000) NOT NULL,
223
+ `content` text DEFAULT NULL,
224
+ `crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
225
+ `spider_name` varchar(100) DEFAULT NULL,
226
+ `extra_data` json DEFAULT NULL,
227
+ UNIQUE KEY `unique_url` (`url`(255)),
228
+ INDEX `idx_spider_name` (`spider_name`),
229
+ INDEX `idx_crawled_at` (`crawled_at`)
230
+ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
231
+ """
232
+
233
+ async with self.connection.cursor() as cursor:
234
+ await cursor.execute(create_sql)
235
+ self.logger.info(f"表 '{table_name}' 准备就绪")
236
+
237
+ async def process_item(self, item, spider):
238
+ """处理数据项(批量存储)。"""
239
+ self.batch_items.append(dict(item))
240
+
241
+ # 批量存储
242
+ if len(self.batch_items) >= self.batch_size:
243
+ await self._save_batch(spider)
244
+
245
+ return item
246
+
247
+ async def _save_batch(self, spider):
248
+ """批量保存数据。"""
249
+ if not self.batch_items:
250
+ return
251
+
252
+ try:
253
+ if self.database_type == 'mysql':
254
+ await self._save_to_mysql(spider)
255
+ elif self.database_type == 'mongodb':
256
+ await self._save_to_mongodb(spider)
257
+
258
+ self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
259
+ self.batch_items.clear()
260
+
261
+ except Exception as e:
262
+ self.logger.error(f"批量保存失败: {e}")
263
+ # 可以选择重试或记录失败数据
264
+
265
+ async def _save_to_mysql(self, spider):
266
+ """保存到 MySQL。"""
267
+ table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
268
+
269
+ insert_sql = f"""
270
+ INSERT INTO `{table_name}`
271
+ (`title`, `url`, `content`, `spider_name`, `extra_data`)
272
+ VALUES (%s, %s, %s, %s, %s)
273
+ ON DUPLICATE KEY UPDATE
274
+ `title` = VALUES(`title`),
275
+ `content` = VALUES(`content`),
276
+ `crawled_at` = CURRENT_TIMESTAMP
277
+ """
278
+
279
+ async with self.connection.cursor() as cursor:
280
+ batch_data = []
281
+ for item in self.batch_items:
282
+ # 提取标准字段
283
+ title = item.get('title', '')[:500] # 限制长度
284
+ url = item.get('url', '')
285
+ content = item.get('content', '')
286
+
287
+ # 其他字段作为 JSON 存储
288
+ extra_fields = {k: v for k, v in item.items()
289
+ if k not in ['title', 'url', 'content']}
290
+ extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
291
+
292
+ batch_data.append((title, url, content, spider.name, extra_data))
293
+
294
+ await cursor.executemany(insert_sql, batch_data)
295
+
296
+ async def _save_to_mongodb(self, spider):
297
+ """保存到 MongoDB。"""
298
+ # 为每个数据项添加 spider_name
299
+ for item in self.batch_items:
300
+ item['spider_name'] = spider.name
301
+
302
+ # 批量插入
303
+ await self.collection.insert_many(self.batch_items)
304
+
305
+ async def close_spider(self, spider):
306
+ """爬虫结束时保存剩余数据并关闭连接。"""
307
+ # 保存剩余数据
308
+ if self.batch_items:
309
+ await self._save_batch(spider)
310
+
311
+ # 关闭连接
312
+ if self.connection:
313
+ if self.database_type == 'mysql':
314
+ self.connection.close()
315
+ elif self.database_type == 'mongodb':
316
+ self.connection.close()
317
+
318
+ self.logger.info("数据库连接已关闭")
319
+
320
+
321
+ # ======================== 使用说明 ========================
322
+ #
323
+ # 在 settings.py 中启用管道:
324
+ # PIPELINES = [
325
+ # '{{project_name}}.pipelines.ValidationPipeline', # 数据验证
326
+ # '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
327
+ # '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
328
+ # '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
329
+ # '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
330
+ # ]
331
+ #
332
+ # Crawlo 框架提供了多种内置的去重管道:
333
+ # 1. crawlo.pipelines.MemoryDedupPipeline - 内存去重(单机模式默认)
334
+ # 2. crawlo.pipelines.RedisDedupPipeline - Redis去重(分布式模式默认)
335
+ # 3. crawlo.pipelines.BloomDedupPipeline - Bloom Filter去重(大规模数据)
336
+ # 4. crawlo.pipelines.DatabaseDedupPipeline - 数据库去重(持久化)
337
+ #
338
+ # 相关配置:
339
+ # JSON_OUTPUT_DIR = 'output' # JSON 文件输出目录
340
+ # DATABASE_TYPE = 'mysql' # 数据库类型: mysql/mongodb
341
+ # MYSQL_TABLE = '{{project_name}}_data' # MySQL 表名
342
+ # ======================== 使用说明 ========================
@@ -0,0 +1,252 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ {{project_name}} 项目运行脚本
5
+ ============================
6
+ 基于 Crawlo 框架的智能爬虫启动器。
7
+ 支持单机/分布式模式,灵活配置,开箱即用。
8
+
9
+ 🎯 快速使用:
10
+ python run.py spider_name # 单机模式运行
11
+ python run.py spider_name --distributed # 分布式模式运行
12
+ python run.py spider_name --env production # 使用预设配置
13
+ python run.py all # 运行所有爬虫
14
+
15
+ 🔧 高级选项:
16
+ python run.py spider_name --dry-run # 干运行(不执行实际爬取)
17
+ python run.py spider_name --concurrency 16 # 自定义并发数
18
+ python run.py spider_name --mode gentle # 温和模式(低负载)
19
+ python run.py spider1 spider2 --distributed # 多爬虫分布式运行
20
+
21
+ 📦 配置模式:
22
+ --standalone 单机模式(默认)- 内存队列,无需外部依赖
23
+ --distributed 分布式模式 - Redis队列,支持多节点
24
+ --auto 自动模式 - 智能检测Redis可用性
25
+
26
+ 🎛️ 预设配置:
27
+ --env development 开发环境(调试友好)
28
+ --env production 生产环境(高性能)
29
+ --env large-scale 大规模爬取(优化内存)
30
+ --env gentle 温和模式(低负载)
31
+ """
32
+
33
+ import os
34
+ import sys
35
+ import asyncio
36
+ import argparse
37
+ from pathlib import Path
38
+ from crawlo.crawler import CrawlerProcess
39
+ from crawlo.config import CrawloConfig
40
+ from crawlo.mode_manager import standalone_mode, distributed_mode, auto_mode
41
+
42
+
43
+ def create_parser():
44
+ """创建命令行参数解析器"""
45
+ parser = argparse.ArgumentParser(
46
+ description='{{project_name}} 爬虫启动器 - 基于 Crawlo 框架',
47
+ formatter_class=argparse.RawDescriptionHelpFormatter,
48
+ epilog="""
49
+ 示例用法:
50
+ python run.py my_spider # 默认单机模式
51
+ python run.py my_spider --distributed # 分布式模式
52
+ python run.py my_spider --env production # 生产环境配置
53
+ python run.py spider1 spider2 # 运行多个爬虫
54
+ python run.py all # 运行所有爬虫
55
+ python run.py my_spider --dry-run # 测试模式
56
+ """
57
+ )
58
+
59
+ # 爬虫名称(位置参数)
60
+ parser.add_argument(
61
+ 'spiders',
62
+ nargs='*',
63
+ help='要运行的爬虫名称(可指定多个,"all"表示运行所有爬虫)'
64
+ )
65
+
66
+ # 运行模式选择
67
+ mode_group = parser.add_mutually_exclusive_group()
68
+ mode_group.add_argument(
69
+ '--standalone',
70
+ action='store_true',
71
+ help='单机模式(默认)- 使用内存队列,无需外部依赖'
72
+ )
73
+ mode_group.add_argument(
74
+ '--distributed',
75
+ action='store_true',
76
+ help='分布式模式 - 使用 Redis 队列,支持多节点爬取'
77
+ )
78
+ mode_group.add_argument(
79
+ '--auto',
80
+ action='store_true',
81
+ help='自动模式 - 智能检测 Redis 可用性选择队列类型'
82
+ )
83
+
84
+ # 预设环境配置
85
+ parser.add_argument(
86
+ '--env',
87
+ choices=['development', 'production', 'large-scale', 'gentle'],
88
+ help='预设环境配置(优先级高于模式选择)'
89
+ )
90
+
91
+ # 性能调优选项
92
+ parser.add_argument(
93
+ '--concurrency',
94
+ type=int,
95
+ help='并发请求数(覆盖默认设置)'
96
+ )
97
+
98
+ parser.add_argument(
99
+ '--delay',
100
+ type=float,
101
+ help='请求延迟时间(秒)'
102
+ )
103
+
104
+ # 功能选项
105
+ parser.add_argument(
106
+ '--dry-run',
107
+ action='store_true',
108
+ help='干运行模式 - 解析页面但不执行实际爬取操作'
109
+ )
110
+
111
+ parser.add_argument(
112
+ '--debug',
113
+ action='store_true',
114
+ help='启用调试模式 - 详细日志输出'
115
+ )
116
+
117
+ parser.add_argument(
118
+ '--config-file',
119
+ type=str,
120
+ help='自定义配置文件路径'
121
+ )
122
+
123
+ # 环境变量支持
124
+ parser.add_argument(
125
+ '--from-env',
126
+ action='store_true',
127
+ help='从环境变量加载配置(CRAWLO_*)'
128
+ )
129
+
130
+ return parser
131
+
132
+
133
+ def build_config(args):
134
+ """根据命令行参数构建配置"""
135
+ config = None
136
+
137
+ # 1. 优先使用环境变量配置
138
+ if args.from_env:
139
+ config = CrawloConfig.from_env()
140
+ print("📋 使用环境变量配置")
141
+
142
+ # 2. 使用预设环境配置
143
+ elif args.env:
144
+ presets = {
145
+ 'development': CrawloConfig.presets().development(),
146
+ 'production': CrawloConfig.presets().production(),
147
+ 'large-scale': CrawloConfig.presets().large_scale(),
148
+ 'gentle': CrawloConfig.presets().gentle()
149
+ }
150
+ config = presets[args.env]
151
+ print(f"🎛️ 使用预设配置: {args.env}")
152
+
153
+ # 3. 使用模式配置
154
+ elif args.distributed:
155
+ config = CrawloConfig.distributed()
156
+ print("🌐 启用分布式模式")
157
+ elif args.auto:
158
+ config = CrawloConfig.auto()
159
+ print("🤖 启用自动检测模式")
160
+ else:
161
+ # 默认单机模式
162
+ config = CrawloConfig.standalone()
163
+ print("💻 使用单机模式(默认)")
164
+
165
+ # 4. 应用命令行参数覆盖
166
+ if args.concurrency:
167
+ config.set('CONCURRENCY', args.concurrency)
168
+ print(f"⚡ 设置并发数: {args.concurrency}")
169
+
170
+ if args.delay:
171
+ config.set('DOWNLOAD_DELAY', args.delay)
172
+ print(f"⏱️ 设置请求延迟: {args.delay}秒")
173
+
174
+ if args.debug:
175
+ config.set('LOG_LEVEL', 'DEBUG')
176
+ print("🐛 启用调试模式")
177
+
178
+ if args.dry_run:
179
+ # 干运行模式的配置(可根据需要调整)
180
+ config.set('DOWNLOAD_DELAY', 0.1) # 加快速度
181
+ config.set('CONCURRENCY', 1) # 降低并发
182
+ print("🧪 启用干运行模式")
183
+
184
+ return config
185
+
186
+
187
+ async def main():
188
+ """主函数:解析参数,构建配置,启动爬虫"""
189
+
190
+ # 解析命令行参数
191
+ parser = create_parser()
192
+ args = parser.parse_args()
193
+
194
+ # 检查是否指定了爬虫
195
+ if not args.spiders:
196
+ print("❌ 请指定要运行的爬虫名称")
197
+ print("\n可用的爬虫:")
198
+ print(" # TODO: 在这里列出你的爬虫")
199
+ print(" # from {{project_name}}.spiders import MySpider")
200
+ print("\n使用方法: python run.py <spider_name>")
201
+ parser.print_help()
202
+ return
203
+
204
+ # 构建配置
205
+ config = build_config(args)
206
+
207
+ # 创建爬虫进程
208
+ print(f"\n🚀 正在启动爬虫: {', '.join(args.spiders)}")
209
+
210
+ if args.dry_run:
211
+ print(" 🧪 [干运行模式] 将解析页面但不执行实际爬取")
212
+
213
+ try:
214
+ # 应用配置并启动
215
+ process = CrawlerProcess(settings=config.to_dict())
216
+
217
+ # 检查是否要运行所有爬虫
218
+ if 'all' in [s.lower() for s in args.spiders]:
219
+ # 获取所有已注册的爬虫名称
220
+ spider_names = process.get_spider_names()
221
+ if not spider_names:
222
+ print("❌ 未找到任何爬虫")
223
+ print("💡 请确保:")
224
+ print(" • 爬虫定义在 'spiders/' 目录中")
225
+ print(" • 爬虫类有 'name' 属性")
226
+ return 1
227
+
228
+ print(f"📋 找到 {len(spider_names)} 个爬虫: {', '.join(spider_names)}")
229
+ # 运行所有爬虫
230
+ await process.crawl(spider_names)
231
+ else:
232
+ # 运行指定爬虫
233
+ await process.crawl(args.spiders)
234
+
235
+ print("\n✅ 所有爬虫执行完成")
236
+
237
+ except ImportError as e:
238
+ print(f"❌ 无法导入爬虫: {e}")
239
+ print(" 请检查爬虫文件是否存在,并更新 run.py 中的导入语句")
240
+ except Exception as e:
241
+ print(f"❌ 运行错误: {e}")
242
+ raise
243
+
244
+
245
+ if __name__ == '__main__':
246
+ try:
247
+ asyncio.run(main())
248
+ except KeyboardInterrupt:
249
+ print("\n⏹️ 用户中断爬虫执行")
250
+ except Exception as e:
251
+ print(f"❌ 运行错误: {e}")
252
+ sys.exit(1)