crawlo 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (118) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +165 -165
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -259
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +81 -81
  25. crawlo/extension/__init__.py +38 -31
  26. crawlo/extension/health_check.py +142 -0
  27. crawlo/extension/log_interval.py +58 -49
  28. crawlo/extension/log_stats.py +82 -44
  29. crawlo/extension/logging_extension.py +44 -35
  30. crawlo/extension/memory_monitor.py +89 -0
  31. crawlo/extension/performance_profiler.py +118 -0
  32. crawlo/extension/request_recorder.py +108 -0
  33. crawlo/filters/__init__.py +154 -154
  34. crawlo/filters/aioredis_filter.py +241 -241
  35. crawlo/filters/memory_filter.py +269 -269
  36. crawlo/items/__init__.py +23 -23
  37. crawlo/items/base.py +21 -21
  38. crawlo/items/fields.py +53 -53
  39. crawlo/items/items.py +104 -104
  40. crawlo/middleware/__init__.py +21 -21
  41. crawlo/middleware/default_header.py +32 -32
  42. crawlo/middleware/download_delay.py +28 -28
  43. crawlo/middleware/middleware_manager.py +135 -135
  44. crawlo/middleware/proxy.py +248 -248
  45. crawlo/middleware/request_ignore.py +30 -30
  46. crawlo/middleware/response_code.py +18 -18
  47. crawlo/middleware/response_filter.py +26 -26
  48. crawlo/middleware/retry.py +124 -124
  49. crawlo/mode_manager.py +200 -200
  50. crawlo/network/__init__.py +21 -21
  51. crawlo/network/request.py +311 -311
  52. crawlo/network/response.py +271 -271
  53. crawlo/pipelines/__init__.py +21 -21
  54. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  55. crawlo/pipelines/console_pipeline.py +39 -39
  56. crawlo/pipelines/csv_pipeline.py +316 -316
  57. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  58. crawlo/pipelines/json_pipeline.py +218 -218
  59. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  60. crawlo/pipelines/mongo_pipeline.py +132 -117
  61. crawlo/pipelines/mysql_pipeline.py +317 -195
  62. crawlo/pipelines/pipeline_manager.py +56 -56
  63. crawlo/pipelines/redis_dedup_pipeline.py +162 -162
  64. crawlo/project.py +153 -153
  65. crawlo/queue/pqueue.py +37 -37
  66. crawlo/queue/queue_manager.py +307 -307
  67. crawlo/queue/redis_priority_queue.py +208 -208
  68. crawlo/settings/__init__.py +7 -7
  69. crawlo/settings/default_settings.py +278 -244
  70. crawlo/settings/setting_manager.py +99 -99
  71. crawlo/spider/__init__.py +639 -639
  72. crawlo/stats_collector.py +59 -59
  73. crawlo/subscriber.py +131 -106
  74. crawlo/task_manager.py +30 -30
  75. crawlo/templates/crawlo.cfg.tmpl +10 -10
  76. crawlo/templates/project/__init__.py.tmpl +3 -3
  77. crawlo/templates/project/items.py.tmpl +17 -17
  78. crawlo/templates/project/middlewares.py.tmpl +111 -87
  79. crawlo/templates/project/pipelines.py.tmpl +97 -341
  80. crawlo/templates/project/run.py.tmpl +251 -251
  81. crawlo/templates/project/settings.py.tmpl +279 -250
  82. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  83. crawlo/templates/spider/spider.py.tmpl +142 -178
  84. crawlo/utils/__init__.py +7 -7
  85. crawlo/utils/controlled_spider_mixin.py +439 -439
  86. crawlo/utils/date_tools.py +233 -233
  87. crawlo/utils/db_helper.py +343 -343
  88. crawlo/utils/func_tools.py +82 -82
  89. crawlo/utils/large_scale_config.py +286 -286
  90. crawlo/utils/large_scale_helper.py +343 -343
  91. crawlo/utils/log.py +128 -128
  92. crawlo/utils/queue_helper.py +175 -175
  93. crawlo/utils/request.py +267 -267
  94. crawlo/utils/request_serializer.py +219 -219
  95. crawlo/utils/spider_loader.py +62 -62
  96. crawlo/utils/system.py +11 -11
  97. crawlo/utils/tools.py +4 -4
  98. crawlo/utils/url.py +39 -39
  99. crawlo-1.1.4.dist-info/METADATA +403 -0
  100. crawlo-1.1.4.dist-info/RECORD +117 -0
  101. examples/__init__.py +7 -7
  102. examples/controlled_spider_example.py +205 -205
  103. tests/__init__.py +7 -7
  104. tests/test_final_validation.py +153 -153
  105. tests/test_proxy_health_check.py +32 -32
  106. tests/test_proxy_middleware_integration.py +136 -136
  107. tests/test_proxy_providers.py +56 -56
  108. tests/test_proxy_stats.py +19 -19
  109. tests/test_proxy_strategies.py +59 -59
  110. tests/test_redis_config.py +28 -28
  111. tests/test_redis_queue.py +224 -224
  112. tests/test_request_serialization.py +70 -70
  113. tests/test_scheduler.py +241 -241
  114. crawlo-1.1.3.dist-info/METADATA +0 -635
  115. crawlo-1.1.3.dist-info/RECORD +0 -113
  116. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
  117. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
  118. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,87 +1,111 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- {{project_name}}.middlewares
4
- ============================
5
- 自定义中间件,用于在请求/响应/异常处理过程中插入自定义逻辑。
6
- """
7
-
8
- import random
9
- from crawlo import Request, Response
10
- from crawlo.utils.log import get_logger
11
- from crawlo.exceptions import IgnoreRequest
12
-
13
-
14
- class RandomUserAgentMiddleware:
15
- """随机 User-Agent 中间件。"""
16
-
17
- def __init__(self):
18
- self.logger = get_logger(self.__class__.__name__)
19
- self.user_agents = [
20
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
21
- 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
22
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0',
23
- 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:135.0) Gecko/20100101 Firefox/135.0',
24
- ]
25
-
26
- def process_request(self, request, spider):
27
- if 'User-Agent' not in request.headers:
28
- ua = random.choice(self.user_agents)
29
- request.headers['User-Agent'] = ua
30
- return None
31
-
32
-
33
- class CustomDownloaderMiddleware:
34
- """自定义下载器中间件示例。"""
35
-
36
- def __init__(self):
37
- self.logger = get_logger(self.__class__.__name__)
38
-
39
- def process_request(self, request, spider):
40
- """在请求被下载器执行前调用。"""
41
- # 示例:添加自定义请求头
42
- # request.headers['Custom-Header'] = 'Custom-Value'
43
- # 示例:设置代理
44
- # request.meta['proxy'] = 'http://proxy.example.com:8080'
45
- return None
46
-
47
- def process_response(self, request, response, spider):
48
- """在响应被 Spider 处理前调用。"""
49
- # 示例:检查响应状态
50
- if response.status_code == 403:
51
- self.logger.warning(f"访问被拒绝: {request.url}")
52
- return response
53
-
54
- def process_exception(self, request, exception, spider):
55
- """在下载或处理过程中发生异常时调用。"""
56
- self.logger.error(f"请求异常: {request.url} - {exception}")
57
- return None
58
-
59
-
60
- class CustomSpiderMiddleware:
61
- """
62
- Spider 中间件示例。
63
- """
64
-
65
- def __init__(self):
66
- self.logger = get_logger(self.__class__.__name__)
67
-
68
- def process_spider_input(self, response, spider):
69
- """在 Spider parse 方法被调用前调用。"""
70
- # 可以用来验证响应
71
- pass
72
-
73
- def process_spider_output(self, response, result, spider):
74
- """在 Spider 的 parse 方法返回结果后调用。"""
75
- # 可以用来过滤或修改结果
76
- for item in result:
77
- yield item
78
-
79
- def process_spider_exception(self, response, exception, spider):
80
- """在 Spider 的 parse 方法抛出异常时调用。"""
81
- self.logger.error(f"Spider 异常: {response.url} - {exception}")
82
- pass
83
-
84
- def process_start_requests(self, start_requests, spider):
85
- """在 Spider 的 start_requests 生成器被消费时调用。"""
86
- for request in start_requests:
87
- yield request
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.middlewares
4
+ ============================
5
+ 自定义中间件,用于在请求/响应/异常处理过程中插入自定义逻辑。
6
+
7
+ 这是一个简单的示例中间件,您可以根据需要添加更多中间件。
8
+ """
9
+
10
+ import random
11
+ from crawlo import Request, Response
12
+ from crawlo.utils.log import get_logger
13
+ from crawlo.exceptions import IgnoreRequest
14
+
15
+
16
+ class ExampleMiddleware:
17
+ """
18
+ 示例中间件,演示如何处理请求、响应和异常。
19
+
20
+ 此中间件会:
21
+ 1. 为请求添加随机 User-Agent
22
+ 2. 记录请求和响应信息
23
+ 3. 处理异常情况
24
+ """
25
+
26
+ def __init__(self):
27
+ self.logger = get_logger(self.__class__.__name__)
28
+ self.user_agents = [
29
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
30
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
31
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0',
32
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:135.0) Gecko/20100101 Firefox/135.0',
33
+ ]
34
+
35
+ def process_request(self, request, spider):
36
+ """
37
+ 在请求被下载器执行前调用。
38
+
39
+ Args:
40
+ request: 请求对象
41
+ spider: 爬虫实例
42
+
43
+ Returns:
44
+ None: 继续处理请求
45
+ Response: 返回响应对象(短路处理)
46
+ Request: 返回新请求对象(替换原请求)
47
+ """
48
+ # 为请求添加随机 User-Agent
49
+ if 'User-Agent' not in request.headers:
50
+ ua = random.choice(self.user_agents)
51
+ request.headers['User-Agent'] = ua
52
+ self.logger.debug(f"为请求 {request.url} 设置 User-Agent: {ua[:50]}...")
53
+
54
+ return None
55
+
56
+ def process_response(self, request, response, spider):
57
+ """
58
+ 在响应被 Spider 处理前调用。
59
+
60
+ Args:
61
+ request: 原始请求对象
62
+ response: 响应对象
63
+ spider: 爬虫实例
64
+
65
+ Returns:
66
+ Response: 处理后的响应对象
67
+ """
68
+ # 记录响应信息
69
+ self.logger.info(f"收到响应: {request.url} - 状态码: {response.status_code}")
70
+
71
+ # 可以在这里处理特殊状态码
72
+ if response.status_code == 403:
73
+ self.logger.warning(f"访问被拒绝: {request.url}")
74
+
75
+ return response
76
+
77
+ def process_exception(self, request, exception, spider):
78
+ """
79
+ 在下载或处理过程中发生异常时调用。
80
+
81
+ Args:
82
+ request: 请求对象
83
+ exception: 异常对象
84
+ spider: 爬虫实例
85
+
86
+ Returns:
87
+ None: 异常将继续传播
88
+ Response: 返回响应对象(处理异常)
89
+ Request: 返回新请求对象(重试请求)
90
+ """
91
+ self.logger.error(f"请求异常: {request.url} - {exception}")
92
+ return None
93
+
94
+
95
+ # ======================== 使用说明 ========================
96
+ #
97
+ # 在 settings.py 中启用中间件:
98
+ # MIDDLEWARES = [
99
+ # '{{project_name}}.middlewares.ExampleMiddleware',
100
+ # ]
101
+ #
102
+ # 您可以根据需要添加更多中间件,例如:
103
+ # 1. 请求处理中间件(修改请求头、设置代理等)
104
+ # 2. 响应处理中间件(解析、过滤等)
105
+ # 3. 异常处理中间件(重试、记录等)
106
+ #
107
+ # 每个中间件可以实现以下方法:
108
+ # - process_request: 处理请求
109
+ # - process_response: 处理响应
110
+ # - process_exception: 处理异常
111
+ # ======================== 使用说明 ========================
@@ -1,342 +1,98 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- {{project_name}}.pipelines
4
- ==========================
5
- 数据管道,用于处理 Spider 返回的 Item。
6
- 例如:清理、验证、去重、保存到数据库等。
7
-
8
- 支持异步并发处理和多种存储后端。
9
- """
10
-
11
- import json
12
- import asyncio
13
- from typing import Dict, Any
14
- from datetime import datetime
15
- from crawlo.exceptions import DropItem
16
- from crawlo.utils.log import get_logger
17
-
18
-
19
- class ValidationPipeline:
20
- """
21
- 数据验证管道。
22
-
23
- 验证必要字段是否存在,过滤无效数据。
24
- """
25
-
26
- def __init__(self):
27
- self.logger = get_logger(self.__class__.__name__)
28
- # 必要字段列表(根据实际需要修改)
29
- self.required_fields = ['title', 'url']
30
-
31
- def process_item(self, item, spider):
32
- """验证数据项。"""
33
- # 检查必要字段
34
- for field in self.required_fields:
35
- if not item.get(field):
36
- raise DropItem(f"缺少必要字段: {field}")
37
-
38
- # 数据清理
39
- if 'title' in item:
40
- item['title'] = str(item['title']).strip()
41
-
42
- # 添加时间戳
43
- item['crawled_at'] = datetime.now().isoformat()
44
-
45
- self.logger.debug(f"数据验证通过: {item.get('url', 'Unknown URL')}")
46
- return item
47
-
48
-
49
- class PrintItemPipeline:
50
- """
51
- 简单的打印管道,用于调试。
52
- """
53
-
54
- def __init__(self):
55
- self.logger = get_logger(self.__class__.__name__)
56
- self.item_count = 0
57
-
58
- def process_item(self, item, spider):
59
- """打印数据项。"""
60
- self.item_count += 1
61
- self.logger.info(f"[第{self.item_count}个数据] {json.dumps(dict(item), ensure_ascii=False, indent=2)}")
62
- return item
63
-
64
-
65
- class DuplicatesPipeline:
66
- """
67
- 去重管道。
68
-
69
- 基于指定字段进行去重,防止重复数据。
70
- """
71
-
72
- def __init__(self):
73
- self.logger = get_logger(self.__class__.__name__)
74
- self.seen = set()
75
- # 去重字段(可以是 'url', 'id', 或其他唯一标识)
76
- self.duplicate_field = 'url'
77
- self.drop_count = 0
78
-
79
- def process_item(self, item, spider):
80
- """检查并去除重复数据。"""
81
- identifier = item.get(self.duplicate_field)
82
-
83
- if not identifier:
84
- self.logger.warning(f"数据项缺少去重字段 '{self.duplicate_field}',跳过去重检查")
85
- return item
86
-
87
- if identifier in self.seen:
88
- self.drop_count += 1
89
- self.logger.debug(f"发现重复数据: {identifier} (已过滤{self.drop_count}个)")
90
- raise DropItem(f"重复数据: {identifier}")
91
-
92
- self.seen.add(identifier)
93
- return item
94
-
95
- def close_spider(self, spider):
96
- """爬虫结束时输出统计信息。"""
97
- self.logger.info(f"去重管道统计: 已过滤{self.drop_count}个重复数据,唯一数据{len(self.seen)}个")
98
-
99
-
100
- class JsonFilesPipeline:
101
- """
102
- JSON 文件存储管道。
103
-
104
- 将每个数据项保存为单独的 JSON 文件。
105
- """
106
-
107
- def __init__(self, output_dir='output'):
108
- self.logger = get_logger(self.__class__.__name__)
109
- self.output_dir = output_dir
110
- self.file_count = 0
111
-
112
- @classmethod
113
- def from_crawler(cls, crawler):
114
- """从爬虫配置创建管道实例。"""
115
- output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'output')
116
- return cls(output_dir=output_dir)
117
-
118
- def open_spider(self, spider):
119
- """爬虫启动时创建输出目录。"""
120
- import os
121
- os.makedirs(self.output_dir, exist_ok=True)
122
- self.logger.info(f"JSON 文件将保存到: {self.output_dir}")
123
-
124
- def process_item(self, item, spider):
125
- """将数据项保存为 JSON 文件。"""
126
- self.file_count += 1
127
- filename = f"{spider.name}_{self.file_count:06d}.json"
128
- filepath = f"{self.output_dir}/{filename}"
129
-
130
- with open(filepath, 'w', encoding='utf-8') as f:
131
- json.dump(dict(item), f, ensure_ascii=False, indent=2)
132
-
133
- self.logger.debug(f"已保存: {filepath}")
134
- return item
135
-
136
-
137
- class DatabasePipeline:
138
- """
139
- 数据库存储管道示例。
140
-
141
- 支持 MySQL 和 MongoDB,可根据需要选择。
142
- """
143
-
144
- def __init__(self, database_type='mysql'):
145
- self.logger = get_logger(self.__class__.__name__)
146
- self.database_type = database_type
147
- self.connection = None
148
- self.batch_items = []
149
- self.batch_size = 100 # 批量写入大小
150
-
151
- @classmethod
152
- def from_crawler(cls, crawler):
153
- """从爬虫配置创建管道实例。"""
154
- db_type = crawler.settings.get('DATABASE_TYPE', 'mysql')
155
- return cls(database_type=db_type)
156
-
157
- async def open_spider(self, spider):
158
- """爬虫启动时连接数据库。"""
159
- if self.database_type == 'mysql':
160
- await self._connect_mysql(spider)
161
- elif self.database_type == 'mongodb':
162
- await self._connect_mongodb(spider)
163
- else:
164
- raise ValueError(f"不支持的数据库类型: {self.database_type}")
165
-
166
- async def _connect_mysql(self, spider):
167
- """连接 MySQL 数据库。"""
168
- try:
169
- import aiomysql
170
-
171
- settings = spider.crawler.settings
172
- self.connection = await aiomysql.connect(
173
- host=settings.get('MYSQL_HOST', '127.0.0.1'),
174
- port=settings.get('MYSQL_PORT', 3306),
175
- user=settings.get('MYSQL_USER', 'root'),
176
- password=settings.get('MYSQL_PASSWORD', ''),
177
- db=settings.get('MYSQL_DB', '{{project_name}}'),
178
- charset='utf8mb4',
179
- autocommit=True
180
- )
181
-
182
- # 创建表(如果不存在)
183
- await self._create_mysql_table(spider)
184
- self.logger.info("已连接到 MySQL 数据库")
185
-
186
- except ImportError:
187
- self.logger.error("缺少 aiomysql 依赖,请安装: pip install aiomysql")
188
- raise
189
- except Exception as e:
190
- self.logger.error(f"MySQL 连接失败: {e}")
191
- raise
192
-
193
- async def _connect_mongodb(self, spider):
194
- """连接 MongoDB 数据库。"""
195
- try:
196
- from motor.motor_asyncio import AsyncIOMotorClient
197
-
198
- settings = spider.crawler.settings
199
- mongo_uri = settings.get('MONGO_URI', 'mongodb://localhost:27017')
200
-
201
- self.connection = AsyncIOMotorClient(mongo_uri)
202
- self.database = self.connection[settings.get('MONGO_DATABASE', '{{project_name}}_db')]
203
- self.collection = self.database[settings.get('MONGO_COLLECTION', '{{project_name}}_items')]
204
-
205
- self.logger.info("已连接到 MongoDB 数据库")
206
-
207
- except ImportError:
208
- self.logger.error("缺少 motor 依赖,请安装: pip install motor")
209
- raise
210
- except Exception as e:
211
- self.logger.error(f"MongoDB 连接失败: {e}")
212
- raise
213
-
214
- async def _create_mysql_table(self, spider):
215
- """创建 MySQL 表结构。"""
216
- table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
217
-
218
- create_sql = f"""
219
- CREATE TABLE IF NOT EXISTS `{table_name}` (
220
- `id` bigint AUTO_INCREMENT PRIMARY KEY,
221
- `title` varchar(500) DEFAULT NULL,
222
- `url` varchar(1000) NOT NULL,
223
- `content` text DEFAULT NULL,
224
- `crawled_at` datetime DEFAULT CURRENT_TIMESTAMP,
225
- `spider_name` varchar(100) DEFAULT NULL,
226
- `extra_data` json DEFAULT NULL,
227
- UNIQUE KEY `unique_url` (`url`(255)),
228
- INDEX `idx_spider_name` (`spider_name`),
229
- INDEX `idx_crawled_at` (`crawled_at`)
230
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
231
- """
232
-
233
- async with self.connection.cursor() as cursor:
234
- await cursor.execute(create_sql)
235
- self.logger.info(f"表 '{table_name}' 准备就绪")
236
-
237
- async def process_item(self, item, spider):
238
- """处理数据项(批量存储)。"""
239
- self.batch_items.append(dict(item))
240
-
241
- # 批量存储
242
- if len(self.batch_items) >= self.batch_size:
243
- await self._save_batch(spider)
244
-
245
- return item
246
-
247
- async def _save_batch(self, spider):
248
- """批量保存数据。"""
249
- if not self.batch_items:
250
- return
251
-
252
- try:
253
- if self.database_type == 'mysql':
254
- await self._save_to_mysql(spider)
255
- elif self.database_type == 'mongodb':
256
- await self._save_to_mongodb(spider)
257
-
258
- self.logger.info(f"批量保存 {len(self.batch_items)} 条数据")
259
- self.batch_items.clear()
260
-
261
- except Exception as e:
262
- self.logger.error(f"批量保存失败: {e}")
263
- # 可以选择重试或记录失败数据
264
-
265
- async def _save_to_mysql(self, spider):
266
- """保存到 MySQL。"""
267
- table_name = spider.crawler.settings.get('MYSQL_TABLE', '{{project_name}}_data')
268
-
269
- insert_sql = f"""
270
- INSERT INTO `{table_name}`
271
- (`title`, `url`, `content`, `spider_name`, `extra_data`)
272
- VALUES (%s, %s, %s, %s, %s)
273
- ON DUPLICATE KEY UPDATE
274
- `title` = VALUES(`title`),
275
- `content` = VALUES(`content`),
276
- `crawled_at` = CURRENT_TIMESTAMP
277
- """
278
-
279
- async with self.connection.cursor() as cursor:
280
- batch_data = []
281
- for item in self.batch_items:
282
- # 提取标准字段
283
- title = item.get('title', '')[:500] # 限制长度
284
- url = item.get('url', '')
285
- content = item.get('content', '')
286
-
287
- # 其他字段作为 JSON 存储
288
- extra_fields = {k: v for k, v in item.items()
289
- if k not in ['title', 'url', 'content']}
290
- extra_data = json.dumps(extra_fields, ensure_ascii=False) if extra_fields else None
291
-
292
- batch_data.append((title, url, content, spider.name, extra_data))
293
-
294
- await cursor.executemany(insert_sql, batch_data)
295
-
296
- async def _save_to_mongodb(self, spider):
297
- """保存到 MongoDB。"""
298
- # 为每个数据项添加 spider_name
299
- for item in self.batch_items:
300
- item['spider_name'] = spider.name
301
-
302
- # 批量插入
303
- await self.collection.insert_many(self.batch_items)
304
-
305
- async def close_spider(self, spider):
306
- """爬虫结束时保存剩余数据并关闭连接。"""
307
- # 保存剩余数据
308
- if self.batch_items:
309
- await self._save_batch(spider)
310
-
311
- # 关闭连接
312
- if self.connection:
313
- if self.database_type == 'mysql':
314
- self.connection.close()
315
- elif self.database_type == 'mongodb':
316
- self.connection.close()
317
-
318
- self.logger.info("数据库连接已关闭")
319
-
320
-
321
- # ======================== 使用说明 ========================
322
- #
323
- # 在 settings.py 中启用管道:
324
- # PIPELINES = [
325
- # '{{project_name}}.pipelines.ValidationPipeline', # 数据验证
326
- # '{{project_name}}.pipelines.DuplicatesPipeline', # 去重过滤
327
- # '{{project_name}}.pipelines.PrintItemPipeline', # 打印输出(调试)
328
- # '{{project_name}}.pipelines.JsonFilesPipeline', # JSON 文件存储
329
- # '{{project_name}}.pipelines.DatabasePipeline', # 数据库存储
330
- # ]
331
- #
332
- # Crawlo 框架提供了多种内置的去重管道:
333
- # 1. crawlo.pipelines.MemoryDedupPipeline - 内存去重(单机模式默认)
334
- # 2. crawlo.pipelines.RedisDedupPipeline - Redis去重(分布式模式默认)
335
- # 3. crawlo.pipelines.BloomDedupPipeline - Bloom Filter去重(大规模数据)
336
- # 4. crawlo.pipelines.DatabaseDedupPipeline - 数据库去重(持久化)
337
- #
338
- # 相关配置:
339
- # JSON_OUTPUT_DIR = 'output' # JSON 文件输出目录
340
- # DATABASE_TYPE = 'mysql' # 数据库类型: mysql/mongodb
341
- # MYSQL_TABLE = '{{project_name}}_data' # MySQL 表名
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.pipelines
4
+ ==========================
5
+ 数据管道,用于处理 Spider 返回的 Item。
6
+ 例如:清理、验证、去重、保存到数据库等。
7
+
8
+ 这是一个简单的示例管道,您可以根据需要添加更多管道。
9
+ """
10
+
11
+ import json
12
+ from datetime import datetime
13
+ from crawlo.exceptions import DropItem
14
+ from crawlo.utils.log import get_logger
15
+
16
+
17
+ class ExamplePipeline:
18
+ """
19
+ 示例管道,演示如何处理数据项。
20
+
21
+ 此管道会:
22
+ 1. 验证必要字段
23
+ 2. 清理数据
24
+ 3. 添加时间戳
25
+ 4. 记录处理日志
26
+ """
27
+
28
+ def __init__(self):
29
+ self.logger = get_logger(self.__class__.__name__)
30
+ self.item_count = 0
31
+
32
+ def process_item(self, item, spider):
33
+ """
34
+ 处理数据项。
35
+
36
+ Args:
37
+ item: 要处理的数据项
38
+ spider: 爬虫实例
39
+
40
+ Returns:
41
+ 处理后的数据项
42
+
43
+ Raises:
44
+ DropItem: 如果数据项无效则抛出此异常
45
+ """
46
+ # 验证必要字段
47
+ if not item.get('title') or not item.get('url'):
48
+ raise DropItem("缺少必要字段: title 或 url")
49
+
50
+ # 数据清理
51
+ item['title'] = str(item['title']).strip()
52
+
53
+ # 添加处理时间戳
54
+ item['processed_at'] = datetime.now().isoformat()
55
+
56
+ # 计数器
57
+ self.item_count += 1
58
+
59
+ # 记录日志
60
+ self.logger.info(f"处理第 {self.item_count} 个数据项: {item['title']}")
61
+
62
+ return item
63
+
64
+ def open_spider(self, spider):
65
+ """
66
+ 爬虫启动时调用。
67
+
68
+ Args:
69
+ spider: 爬虫实例
70
+ """
71
+ self.logger.info(f"管道已启动,准备处理爬虫 '{spider.name}' 的数据")
72
+
73
+ def close_spider(self, spider):
74
+ """
75
+ 爬虫关闭时调用。
76
+
77
+ Args:
78
+ spider: 爬虫实例
79
+ """
80
+ self.logger.info(f"管道已关闭,共处理了 {self.item_count} 个数据项")
81
+
82
+
83
+ # ======================== 使用说明 ========================
84
+ #
85
+ # 在 settings.py 中启用管道:
86
+ # PIPELINES = [
87
+ # '{{project_name}}.pipelines.ExamplePipeline',
88
+ # ]
89
+ #
90
+ # 您可以根据需要添加更多管道,例如:
91
+ # 1. 数据验证管道
92
+ # 2. 去重管道
93
+ # 3. 数据存储管道(数据库、文件等)
94
+ # 4. 数据转换管道
95
+ #
96
+ # 每个管道都应该实现 process_item 方法,
97
+ # 可选实现 open_spider 和 close_spider 方法。
342
98
  # ======================== 使用说明 ========================