crawlo 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (128) hide show
  1. crawlo/__init__.py +34 -33
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +152 -126
  7. crawlo/commands/list.py +156 -147
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -111
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -57
  18. crawlo/crawler.py +1028 -495
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +251 -241
  22. crawlo/downloader/httpx_downloader.py +259 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +271 -166
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +317 -0
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +219 -0
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/__init__.py +0 -0
  62. crawlo/queue/pqueue.py +37 -0
  63. crawlo/queue/queue_manager.py +308 -0
  64. crawlo/queue/redis_priority_queue.py +209 -0
  65. crawlo/settings/__init__.py +7 -7
  66. crawlo/settings/default_settings.py +245 -167
  67. crawlo/settings/setting_manager.py +99 -99
  68. crawlo/spider/__init__.py +639 -129
  69. crawlo/stats_collector.py +59 -59
  70. crawlo/subscriber.py +106 -106
  71. crawlo/task_manager.py +30 -27
  72. crawlo/templates/crawlo.cfg.tmpl +10 -10
  73. crawlo/templates/project/__init__.py.tmpl +3 -3
  74. crawlo/templates/project/items.py.tmpl +17 -17
  75. crawlo/templates/project/middlewares.py.tmpl +87 -76
  76. crawlo/templates/project/pipelines.py.tmpl +342 -64
  77. crawlo/templates/project/run.py.tmpl +252 -0
  78. crawlo/templates/project/settings.py.tmpl +251 -54
  79. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  80. crawlo/templates/spider/spider.py.tmpl +178 -32
  81. crawlo/utils/__init__.py +7 -7
  82. crawlo/utils/controlled_spider_mixin.py +440 -0
  83. crawlo/utils/date_tools.py +233 -233
  84. crawlo/utils/db_helper.py +343 -343
  85. crawlo/utils/func_tools.py +82 -82
  86. crawlo/utils/large_scale_config.py +287 -0
  87. crawlo/utils/large_scale_helper.py +344 -0
  88. crawlo/utils/log.py +128 -128
  89. crawlo/utils/queue_helper.py +176 -0
  90. crawlo/utils/request.py +267 -267
  91. crawlo/utils/request_serializer.py +220 -0
  92. crawlo/utils/spider_loader.py +62 -62
  93. crawlo/utils/system.py +11 -11
  94. crawlo/utils/tools.py +4 -4
  95. crawlo/utils/url.py +39 -39
  96. crawlo-1.1.3.dist-info/METADATA +635 -0
  97. crawlo-1.1.3.dist-info/RECORD +113 -0
  98. examples/__init__.py +7 -7
  99. examples/controlled_spider_example.py +205 -0
  100. tests/__init__.py +7 -7
  101. tests/test_final_validation.py +154 -0
  102. tests/test_proxy_health_check.py +32 -32
  103. tests/test_proxy_middleware_integration.py +136 -136
  104. tests/test_proxy_providers.py +56 -56
  105. tests/test_proxy_stats.py +19 -19
  106. tests/test_proxy_strategies.py +59 -59
  107. tests/test_redis_config.py +29 -0
  108. tests/test_redis_queue.py +225 -0
  109. tests/test_request_serialization.py +71 -0
  110. tests/test_scheduler.py +242 -0
  111. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  112. crawlo/utils/pqueue.py +0 -174
  113. crawlo-1.1.1.dist-info/METADATA +0 -220
  114. crawlo-1.1.1.dist-info/RECORD +0 -100
  115. examples/baidu_spider/__init__.py +0 -7
  116. examples/baidu_spider/demo.py +0 -94
  117. examples/baidu_spider/items.py +0 -46
  118. examples/baidu_spider/middleware.py +0 -49
  119. examples/baidu_spider/pipeline.py +0 -55
  120. examples/baidu_spider/run.py +0 -27
  121. examples/baidu_spider/settings.py +0 -121
  122. examples/baidu_spider/spiders/__init__.py +0 -7
  123. examples/baidu_spider/spiders/bai_du.py +0 -61
  124. examples/baidu_spider/spiders/miit.py +0 -159
  125. examples/baidu_spider/spiders/sina.py +0 -79
  126. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  127. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  128. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,40 +1,40 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Dict, Any
4
-
5
- from crawlo import Item
6
- from crawlo.spider import Spider
7
- from crawlo.utils.log import get_logger
8
-
9
-
10
- class ConsolePipeline:
11
- """将Item内容输出到控制台的管道"""
12
-
13
- def __init__(self, log_level: str = "DEBUG"):
14
- self.logger = get_logger(self.__class__.__name__, log_level)
15
-
16
- @classmethod
17
- def from_crawler(cls, crawler):
18
- """从crawler实例创建管道"""
19
- return cls(
20
- log_level=crawler.settings.get('LOG_LEVEL', 'DEBUG')
21
- )
22
-
23
- async def process_item(self, item: Item, spider: Spider) -> Item:
24
- """处理Item并输出到日志"""
25
- try:
26
- item_dict = self._convert_to_serializable(item)
27
- self.logger.info(f"Item processed: {item_dict}")
28
- return item
29
- except Exception as e:
30
- self.logger.error(f"Error processing item: {e}", exc_info=True)
31
- raise
32
-
33
- @staticmethod
34
- def _convert_to_serializable(item: Item) -> Dict[str, Any]:
35
- """将Item转换为可序列化的字典"""
36
- try:
37
- return item.to_dict()
38
- except AttributeError:
39
- # 兼容没有to_dict方法的Item实现
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Dict, Any
4
+
5
+ from crawlo import Item
6
+ from crawlo.spider import Spider
7
+ from crawlo.utils.log import get_logger
8
+
9
+
10
+ class ConsolePipeline:
11
+ """将Item内容输出到控制台的管道"""
12
+
13
+ def __init__(self, log_level: str = "DEBUG"):
14
+ self.logger = get_logger(self.__class__.__name__, log_level)
15
+
16
+ @classmethod
17
+ def from_crawler(cls, crawler):
18
+ """从crawler实例创建管道"""
19
+ return cls(
20
+ log_level=crawler.settings.get('LOG_LEVEL', 'DEBUG')
21
+ )
22
+
23
+ async def process_item(self, item: Item, spider: Spider) -> Item:
24
+ """处理Item并输出到日志"""
25
+ try:
26
+ item_dict = self._convert_to_serializable(item)
27
+ self.logger.info(f"Item processed: {item_dict}")
28
+ return item
29
+ except Exception as e:
30
+ self.logger.error(f"Error processing item: {e}", exc_info=True)
31
+ raise
32
+
33
+ @staticmethod
34
+ def _convert_to_serializable(item: Item) -> Dict[str, Any]:
35
+ """将Item转换为可序列化的字典"""
36
+ try:
37
+ return item.to_dict()
38
+ except AttributeError:
39
+ # 兼容没有to_dict方法的Item实现
40
40
  return dict(item)
@@ -0,0 +1,317 @@
1
+ # -*- coding: utf-8 -*-
2
+ import csv
3
+ import asyncio
4
+ from pathlib import Path
5
+ from typing import Optional, List
6
+ from datetime import datetime
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.exceptions import ItemDiscard
9
+
10
+
11
+ class CsvPipeline:
12
+ """CSV文件输出管道"""
13
+
14
+ def __init__(self, crawler):
15
+ self.crawler = crawler
16
+ self.settings = crawler.settings
17
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
18
+
19
+ # 配置文件路径
20
+ self.file_path = self._get_file_path()
21
+ self.file_handle = None
22
+ self.csv_writer = None
23
+ self.headers_written = False
24
+ self.lock = asyncio.Lock() # 异步锁保证线程安全
25
+
26
+ # CSV配置
27
+ self.delimiter = self.settings.get('CSV_DELIMITER', ',')
28
+ self.quotechar = self.settings.get('CSV_QUOTECHAR', '"')
29
+ self.include_headers = self.settings.get_bool('CSV_INCLUDE_HEADERS', True)
30
+
31
+ # 注册关闭事件
32
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
33
+
34
+ @classmethod
35
+ def from_crawler(cls, crawler):
36
+ return cls(crawler)
37
+
38
+ def _get_file_path(self) -> Path:
39
+ """获取输出文件路径"""
40
+ file_path = (
41
+ self.settings.get('CSV_FILE') or
42
+ getattr(self.crawler.spider, 'csv_file', None) or
43
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
44
+ )
45
+
46
+ path = Path(file_path)
47
+ path.parent.mkdir(parents=True, exist_ok=True)
48
+ return path
49
+
50
+ async def _ensure_file_open(self):
51
+ """确保文件已打开"""
52
+ if self.file_handle is None:
53
+ self.file_handle = open(self.file_path, 'w', newline='', encoding='utf-8')
54
+ self.csv_writer = csv.writer(
55
+ self.file_handle,
56
+ delimiter=self.delimiter,
57
+ quotechar=self.quotechar,
58
+ quoting=csv.QUOTE_MINIMAL
59
+ )
60
+ self.logger.info(f"CSV文件已创建: {self.file_path}")
61
+
62
+ async def _write_headers(self, item_dict: dict):
63
+ """写入CSV表头"""
64
+ if not self.headers_written and self.include_headers:
65
+ headers = list(item_dict.keys())
66
+ self.csv_writer.writerow(headers)
67
+ self.headers_written = True
68
+ self.logger.debug(f"CSV表头已写入: {headers}")
69
+
70
+ async def process_item(self, item, spider) -> Optional[dict]:
71
+ """处理item的核心方法"""
72
+ try:
73
+ async with self.lock:
74
+ await self._ensure_file_open()
75
+
76
+ # 转换为字典
77
+ item_dict = dict(item)
78
+
79
+ # 写入表头(仅第一次)
80
+ await self._write_headers(item_dict)
81
+
82
+ # 写入数据行
83
+ values = [str(v) if v is not None else '' for v in item_dict.values()]
84
+ self.csv_writer.writerow(values)
85
+ self.file_handle.flush() # 立即刷新到磁盘
86
+
87
+ # 统计
88
+ self.crawler.stats.inc_value('csv_pipeline/items_written')
89
+ self.logger.debug(f"写入CSV行: {len(item_dict)} 字段")
90
+
91
+ return item
92
+
93
+ except Exception as e:
94
+ self.crawler.stats.inc_value('csv_pipeline/items_failed')
95
+ self.logger.error(f"CSV写入失败: {e}")
96
+ raise ItemDiscard(f"CSV Pipeline处理失败: {e}")
97
+
98
+ async def spider_closed(self):
99
+ """关闭爬虫时清理资源"""
100
+ if self.file_handle:
101
+ self.file_handle.close()
102
+ self.logger.info(f"CSV文件已关闭: {self.file_path}")
103
+
104
+
105
+ class CsvDictPipeline:
106
+ """CSV字典写入器管道(使用DictWriter,支持字段映射)"""
107
+
108
+ def __init__(self, crawler):
109
+ self.crawler = crawler
110
+ self.settings = crawler.settings
111
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
112
+
113
+ self.file_path = self._get_file_path()
114
+ self.file_handle = None
115
+ self.csv_writer = None
116
+ self.fieldnames = None
117
+ self.headers_written = False
118
+ self.lock = asyncio.Lock()
119
+
120
+ # 配置选项
121
+ self.delimiter = self.settings.get('CSV_DELIMITER', ',')
122
+ self.quotechar = self.settings.get('CSV_QUOTECHAR', '"')
123
+ self.include_headers = self.settings.get_bool('CSV_INCLUDE_HEADERS', True)
124
+ self.extrasaction = self.settings.get('CSV_EXTRASACTION', 'ignore') # ignore, raise
125
+
126
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
127
+
128
+ @classmethod
129
+ def from_crawler(cls, crawler):
130
+ return cls(crawler)
131
+
132
+ def _get_file_path(self) -> Path:
133
+ """获取输出文件路径"""
134
+ file_path = (
135
+ self.settings.get('CSV_DICT_FILE') or
136
+ getattr(self.crawler.spider, 'csv_dict_file', None) or
137
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_dict.csv"
138
+ )
139
+
140
+ path = Path(file_path)
141
+ path.parent.mkdir(parents=True, exist_ok=True)
142
+ return path
143
+
144
+ def _get_fieldnames(self, item_dict: dict) -> List[str]:
145
+ """获取字段名列表"""
146
+ # 优先使用配置的字段名
147
+ configured_fields = self.settings.get('CSV_FIELDNAMES')
148
+ if configured_fields:
149
+ return configured_fields if isinstance(configured_fields, list) else configured_fields.split(',')
150
+
151
+ # 使用爬虫定义的字段名
152
+ spider_fields = getattr(self.crawler.spider, 'csv_fieldnames', None)
153
+ if spider_fields:
154
+ return spider_fields if isinstance(spider_fields, list) else spider_fields.split(',')
155
+
156
+ # 使用item的字段名
157
+ return list(item_dict.keys())
158
+
159
+ async def _ensure_file_open(self, item_dict: dict):
160
+ """确保文件已打开"""
161
+ if self.file_handle is None:
162
+ self.fieldnames = self._get_fieldnames(item_dict)
163
+
164
+ self.file_handle = open(self.file_path, 'w', newline='', encoding='utf-8')
165
+ self.csv_writer = csv.DictWriter(
166
+ self.file_handle,
167
+ fieldnames=self.fieldnames,
168
+ delimiter=self.delimiter,
169
+ quotechar=self.quotechar,
170
+ quoting=csv.QUOTE_MINIMAL,
171
+ extrasaction=self.extrasaction
172
+ )
173
+
174
+ # 写入表头
175
+ if self.include_headers:
176
+ self.csv_writer.writeheader()
177
+ self.headers_written = True
178
+
179
+ self.logger.info(f"CSV字典文件已创建: {self.file_path},字段: {self.fieldnames}")
180
+
181
+ async def process_item(self, item, spider) -> Optional[dict]:
182
+ """处理item方法"""
183
+ try:
184
+ async with self.lock:
185
+ item_dict = dict(item)
186
+ await self._ensure_file_open(item_dict)
187
+
188
+ # 写入数据行
189
+ self.csv_writer.writerow(item_dict)
190
+ self.file_handle.flush()
191
+
192
+ self.crawler.stats.inc_value('csv_dict_pipeline/items_written')
193
+ self.logger.debug(f"写入CSV字典行,字段数: {len(item_dict)}")
194
+
195
+ return item
196
+
197
+ except Exception as e:
198
+ self.crawler.stats.inc_value('csv_dict_pipeline/items_failed')
199
+ self.logger.error(f"CSV字典写入失败: {e}")
200
+ raise ItemDiscard(f"CSV Dict Pipeline处理失败: {e}")
201
+
202
+ async def spider_closed(self):
203
+ """资源清理"""
204
+ if self.file_handle:
205
+ self.file_handle.close()
206
+ self.logger.info(f"CSV字典文件已关闭: {self.file_path}")
207
+
208
+
209
+ class CsvBatchPipeline:
210
+ """CSV批量写入管道(内存缓存,批量写入,提高性能)"""
211
+
212
+ def __init__(self, crawler):
213
+ self.crawler = crawler
214
+ self.settings = crawler.settings
215
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
216
+
217
+ self.file_path = self._get_file_path()
218
+ self.file_handle = None
219
+ self.csv_writer = None
220
+ self.batch_buffer = []
221
+ self.headers_written = False
222
+ self.lock = asyncio.Lock()
223
+
224
+ # 批量配置
225
+ self.batch_size = self.settings.get_int('CSV_BATCH_SIZE', 100)
226
+ self.delimiter = self.settings.get('CSV_DELIMITER', ',')
227
+ self.quotechar = self.settings.get('CSV_QUOTECHAR', '"')
228
+ self.include_headers = self.settings.get_bool('CSV_INCLUDE_HEADERS', True)
229
+
230
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
231
+
232
+ @classmethod
233
+ def from_crawler(cls, crawler):
234
+ return cls(crawler)
235
+
236
+ def _get_file_path(self) -> Path:
237
+ """获取输出文件路径"""
238
+ file_path = (
239
+ self.settings.get('CSV_BATCH_FILE') or
240
+ getattr(self.crawler.spider, 'csv_batch_file', None) or
241
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_batch.csv"
242
+ )
243
+
244
+ path = Path(file_path)
245
+ path.parent.mkdir(parents=True, exist_ok=True)
246
+ return path
247
+
248
+ async def _ensure_file_open(self):
249
+ """确保文件已打开"""
250
+ if self.file_handle is None:
251
+ self.file_handle = open(self.file_path, 'w', newline='', encoding='utf-8')
252
+ self.csv_writer = csv.writer(
253
+ self.file_handle,
254
+ delimiter=self.delimiter,
255
+ quotechar=self.quotechar,
256
+ quoting=csv.QUOTE_MINIMAL
257
+ )
258
+ self.logger.info(f"CSV批量文件已创建: {self.file_path}")
259
+
260
+ async def _flush_batch(self):
261
+ """刷新批量缓存到文件"""
262
+ if not self.batch_buffer:
263
+ return
264
+
265
+ await self._ensure_file_open()
266
+
267
+ for row in self.batch_buffer:
268
+ self.csv_writer.writerow(row)
269
+
270
+ self.file_handle.flush()
271
+ items_count = len(self.batch_buffer)
272
+ self.batch_buffer.clear()
273
+
274
+ self.crawler.stats.inc_value('csv_batch_pipeline/batches_written')
275
+ self.crawler.stats.inc_value('csv_batch_pipeline/items_written', count=items_count)
276
+ self.logger.info(f"批量写入 {items_count} 行到CSV文件")
277
+
278
+ async def process_item(self, item, spider) -> Optional[dict]:
279
+ """处理item方法"""
280
+ try:
281
+ async with self.lock:
282
+ item_dict = dict(item)
283
+
284
+ # 写入表头(仅第一次)
285
+ if not self.headers_written and self.include_headers:
286
+ headers = list(item_dict.keys())
287
+ self.batch_buffer.append(headers)
288
+ self.headers_written = True
289
+
290
+ # 添加数据到缓存
291
+ values = [str(v) if v is not None else '' for v in item_dict.values()]
292
+ self.batch_buffer.append(values)
293
+
294
+ # 检查是否需要刷新批量缓存
295
+ if len(self.batch_buffer) >= self.batch_size:
296
+ await self._flush_batch()
297
+
298
+ return item
299
+
300
+ except Exception as e:
301
+ self.crawler.stats.inc_value('csv_batch_pipeline/items_failed')
302
+ self.logger.error(f"CSV批量处理失败: {e}")
303
+ raise ItemDiscard(f"CSV Batch Pipeline处理失败: {e}")
304
+
305
+ async def spider_closed(self):
306
+ """关闭时刷新剩余缓存"""
307
+ try:
308
+ # 刷新剩余的批量数据
309
+ async with self.lock:
310
+ await self._flush_batch()
311
+
312
+ if self.file_handle:
313
+ self.file_handle.close()
314
+ self.logger.info(f"CSV批量文件已关闭: {self.file_path}")
315
+
316
+ except Exception as e:
317
+ self.logger.error(f"关闭CSV批量管道时出错: {e}")
@@ -0,0 +1,225 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 基于数据库的数据项去重管道
5
+ =======================
6
+ 提供持久化去重功能,适用于需要长期运行或断点续爬的场景。
7
+
8
+ 特点:
9
+ - 持久化存储: 重启爬虫后仍能保持去重状态
10
+ - 可靠性高: 数据库事务保证一致性
11
+ - 适用性广: 支持多种数据库后端
12
+ - 可扩展: 支持自定义表结构和字段
13
+ """
14
+
15
+ import hashlib
16
+ from typing import Dict, Any, Optional
17
+ import aiomysql
18
+
19
+ from crawlo import Item
20
+ from crawlo.spider import Spider
21
+ from crawlo.utils.log import get_logger
22
+ from crawlo.exceptions import DropItem
23
+
24
+
25
+ class DatabaseDedupPipeline:
26
+ """基于数据库的数据项去重管道"""
27
+
28
+ def __init__(
29
+ self,
30
+ db_host: str = 'localhost',
31
+ db_port: int = 3306,
32
+ db_user: str = 'root',
33
+ db_password: str = '',
34
+ db_name: str = 'crawlo',
35
+ table_name: str = 'item_fingerprints',
36
+ log_level: str = "INFO"
37
+ ):
38
+ """
39
+ 初始化数据库去重管道
40
+
41
+ :param db_host: 数据库主机地址
42
+ :param db_port: 数据库端口
43
+ :param db_user: 数据库用户名
44
+ :param db_password: 数据库密码
45
+ :param db_name: 数据库名称
46
+ :param table_name: 存储指纹的表名
47
+ :param log_level: 日志级别
48
+ """
49
+ self.logger = get_logger(self.__class__.__name__, log_level)
50
+
51
+ # 数据库连接参数
52
+ self.db_config = {
53
+ 'host': db_host,
54
+ 'port': db_port,
55
+ 'user': db_user,
56
+ 'password': db_password,
57
+ 'db': db_name,
58
+ 'autocommit': False
59
+ }
60
+
61
+ self.table_name = table_name
62
+ self.dropped_count = 0
63
+ self.connection = None
64
+ self.pool = None
65
+
66
+ @classmethod
67
+ def from_crawler(cls, crawler):
68
+ """从爬虫配置创建管道实例"""
69
+ settings = crawler.settings
70
+
71
+ return cls(
72
+ db_host=settings.get('DB_HOST', 'localhost'),
73
+ db_port=settings.getint('DB_PORT', 3306),
74
+ db_user=settings.get('DB_USER', 'root'),
75
+ db_password=settings.get('DB_PASSWORD', ''),
76
+ db_name=settings.get('DB_NAME', 'crawlo'),
77
+ table_name=settings.get('DB_DEDUP_TABLE', 'item_fingerprints'),
78
+ log_level=settings.get('LOG_LEVEL', 'INFO')
79
+ )
80
+
81
+ async def open_spider(self, spider: Spider) -> None:
82
+ """
83
+ 爬虫启动时初始化数据库连接
84
+
85
+ :param spider: 爬虫实例
86
+ """
87
+ try:
88
+ # 创建连接池
89
+ self.pool = await aiomysql.create_pool(
90
+ **self.db_config,
91
+ minsize=2,
92
+ maxsize=10
93
+ )
94
+
95
+ # 创建去重表(如果不存在)
96
+ await self._create_dedup_table()
97
+
98
+ self.logger.info(f"数据库去重管道初始化完成: {self.db_config['host']}:{self.db_config['port']}/{self.db_config['db']}.{self.table_name}")
99
+ except Exception as e:
100
+ self.logger.error(f"数据库去重管道初始化失败: {e}")
101
+ raise RuntimeError(f"数据库去重管道初始化失败: {e}")
102
+
103
+ async def _create_dedup_table(self) -> None:
104
+ """创建去重表"""
105
+ create_table_sql = f"""
106
+ CREATE TABLE IF NOT EXISTS `{self.table_name}` (
107
+ `id` BIGINT AUTO_INCREMENT PRIMARY KEY,
108
+ `fingerprint` VARCHAR(64) NOT NULL UNIQUE,
109
+ `created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
110
+ INDEX `idx_fingerprint` (`fingerprint`)
111
+ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
112
+ """
113
+
114
+ async with self.pool.acquire() as conn:
115
+ async with conn.cursor() as cursor:
116
+ await cursor.execute(create_table_sql)
117
+ await conn.commit()
118
+
119
+ async def process_item(self, item: Item, spider: Spider) -> Item:
120
+ """
121
+ 处理数据项,进行去重检查
122
+
123
+ :param item: 要处理的数据项
124
+ :param spider: 爬虫实例
125
+ :return: 处理后的数据项或抛出 DropItem 异常
126
+ """
127
+ try:
128
+ # 生成数据项指纹
129
+ fingerprint = self._generate_item_fingerprint(item)
130
+
131
+ # 检查指纹是否已存在
132
+ exists = await self._check_fingerprint_exists(fingerprint)
133
+
134
+ if exists:
135
+ # 如果已存在,丢弃这个数据项
136
+ self.dropped_count += 1
137
+ self.logger.debug(f"丢弃重复数据项: {fingerprint[:20]}...")
138
+ raise DropItem(f"重复的数据项: {fingerprint}")
139
+ else:
140
+ # 记录新数据项的指纹
141
+ await self._insert_fingerprint(fingerprint)
142
+ self.logger.debug(f"处理新数据项: {fingerprint[:20]}...")
143
+ return item
144
+
145
+ except Exception as e:
146
+ self.logger.error(f"处理数据项时出错: {e}")
147
+ # 在错误时继续处理,避免丢失数据
148
+ return item
149
+
150
+ async def _check_fingerprint_exists(self, fingerprint: str) -> bool:
151
+ """
152
+ 检查指纹是否已存在
153
+
154
+ :param fingerprint: 数据项指纹
155
+ :return: 是否存在
156
+ """
157
+ check_sql = f"SELECT 1 FROM `{self.table_name}` WHERE `fingerprint` = %s LIMIT 1"
158
+
159
+ async with self.pool.acquire() as conn:
160
+ async with conn.cursor() as cursor:
161
+ await cursor.execute(check_sql, (fingerprint,))
162
+ result = await cursor.fetchone()
163
+ return result is not None
164
+
165
+ async def _insert_fingerprint(self, fingerprint: str) -> None:
166
+ """
167
+ 插入新指纹
168
+
169
+ :param fingerprint: 数据项指纹
170
+ """
171
+ insert_sql = f"INSERT INTO `{self.table_name}` (`fingerprint`) VALUES (%s)"
172
+
173
+ async with self.pool.acquire() as conn:
174
+ async with conn.cursor() as cursor:
175
+ try:
176
+ await cursor.execute(insert_sql, (fingerprint,))
177
+ await conn.commit()
178
+ except aiomysql.IntegrityError:
179
+ # 指纹已存在(并发情况下可能发生)
180
+ await conn.rollback()
181
+ raise DropItem(f"重复的数据项: {fingerprint}")
182
+ except Exception:
183
+ await conn.rollback()
184
+ raise
185
+
186
+ def _generate_item_fingerprint(self, item: Item) -> str:
187
+ """
188
+ 生成数据项指纹
189
+
190
+ 基于数据项的所有字段生成唯一指纹,用于去重判断。
191
+
192
+ :param item: 数据项
193
+ :return: 指纹字符串
194
+ """
195
+ # 将数据项转换为可序列化的字典
196
+ try:
197
+ item_dict = item.to_dict()
198
+ except AttributeError:
199
+ # 兼容没有to_dict方法的Item实现
200
+ item_dict = dict(item)
201
+
202
+ # 对字典进行排序以确保一致性
203
+ sorted_items = sorted(item_dict.items())
204
+
205
+ # 生成指纹字符串
206
+ fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
207
+
208
+ # 使用 SHA256 生成固定长度的指纹
209
+ return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
210
+
211
+ async def close_spider(self, spider: Spider) -> None:
212
+ """
213
+ 爬虫关闭时的清理工作
214
+
215
+ :param spider: 爬虫实例
216
+ """
217
+ try:
218
+ if self.pool:
219
+ self.pool.close()
220
+ await self.pool.wait_closed()
221
+
222
+ self.logger.info(f"爬虫 {spider.name} 关闭:")
223
+ self.logger.info(f" - 丢弃的重复数据项: {self.dropped_count}")
224
+ except Exception as e:
225
+ self.logger.error(f"关闭爬虫时出错: {e}")