crawlo 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (128) hide show
  1. crawlo/__init__.py +34 -33
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +152 -126
  7. crawlo/commands/list.py +156 -147
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -111
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -57
  18. crawlo/crawler.py +1028 -495
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +251 -241
  22. crawlo/downloader/httpx_downloader.py +259 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +271 -166
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +317 -0
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +219 -0
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/__init__.py +0 -0
  62. crawlo/queue/pqueue.py +37 -0
  63. crawlo/queue/queue_manager.py +308 -0
  64. crawlo/queue/redis_priority_queue.py +209 -0
  65. crawlo/settings/__init__.py +7 -7
  66. crawlo/settings/default_settings.py +245 -167
  67. crawlo/settings/setting_manager.py +99 -99
  68. crawlo/spider/__init__.py +639 -129
  69. crawlo/stats_collector.py +59 -59
  70. crawlo/subscriber.py +106 -106
  71. crawlo/task_manager.py +30 -27
  72. crawlo/templates/crawlo.cfg.tmpl +10 -10
  73. crawlo/templates/project/__init__.py.tmpl +3 -3
  74. crawlo/templates/project/items.py.tmpl +17 -17
  75. crawlo/templates/project/middlewares.py.tmpl +87 -76
  76. crawlo/templates/project/pipelines.py.tmpl +342 -64
  77. crawlo/templates/project/run.py.tmpl +252 -0
  78. crawlo/templates/project/settings.py.tmpl +251 -54
  79. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  80. crawlo/templates/spider/spider.py.tmpl +178 -32
  81. crawlo/utils/__init__.py +7 -7
  82. crawlo/utils/controlled_spider_mixin.py +440 -0
  83. crawlo/utils/date_tools.py +233 -233
  84. crawlo/utils/db_helper.py +343 -343
  85. crawlo/utils/func_tools.py +82 -82
  86. crawlo/utils/large_scale_config.py +287 -0
  87. crawlo/utils/large_scale_helper.py +344 -0
  88. crawlo/utils/log.py +128 -128
  89. crawlo/utils/queue_helper.py +176 -0
  90. crawlo/utils/request.py +267 -267
  91. crawlo/utils/request_serializer.py +220 -0
  92. crawlo/utils/spider_loader.py +62 -62
  93. crawlo/utils/system.py +11 -11
  94. crawlo/utils/tools.py +4 -4
  95. crawlo/utils/url.py +39 -39
  96. crawlo-1.1.3.dist-info/METADATA +635 -0
  97. crawlo-1.1.3.dist-info/RECORD +113 -0
  98. examples/__init__.py +7 -7
  99. examples/controlled_spider_example.py +205 -0
  100. tests/__init__.py +7 -7
  101. tests/test_final_validation.py +154 -0
  102. tests/test_proxy_health_check.py +32 -32
  103. tests/test_proxy_middleware_integration.py +136 -136
  104. tests/test_proxy_providers.py +56 -56
  105. tests/test_proxy_stats.py +19 -19
  106. tests/test_proxy_strategies.py +59 -59
  107. tests/test_redis_config.py +29 -0
  108. tests/test_redis_queue.py +225 -0
  109. tests/test_request_serialization.py +71 -0
  110. tests/test_scheduler.py +242 -0
  111. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  112. crawlo/utils/pqueue.py +0 -174
  113. crawlo-1.1.1.dist-info/METADATA +0 -220
  114. crawlo-1.1.1.dist-info/RECORD +0 -100
  115. examples/baidu_spider/__init__.py +0 -7
  116. examples/baidu_spider/demo.py +0 -94
  117. examples/baidu_spider/items.py +0 -46
  118. examples/baidu_spider/middleware.py +0 -49
  119. examples/baidu_spider/pipeline.py +0 -55
  120. examples/baidu_spider/run.py +0 -27
  121. examples/baidu_spider/settings.py +0 -121
  122. examples/baidu_spider/spiders/__init__.py +0 -7
  123. examples/baidu_spider/spiders/bai_du.py +0 -61
  124. examples/baidu_spider/spiders/miit.py +0 -159
  125. examples/baidu_spider/spiders/sina.py +0 -79
  126. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  127. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  128. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
1
+ # -*- coding: utf-8 -*-
2
+ import json
3
+ import asyncio
4
+ from pathlib import Path
5
+ from typing import Optional
6
+ from datetime import datetime
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.exceptions import ItemDiscard
9
+
10
+
11
+ class JsonPipeline:
12
+ """JSON文件输出管道"""
13
+
14
+ def __init__(self, crawler):
15
+ self.crawler = crawler
16
+ self.settings = crawler.settings
17
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
18
+
19
+ # 配置文件路径
20
+ self.file_path = self._get_file_path()
21
+ self.file_handle = None
22
+ self.lock = asyncio.Lock() # 异步锁保证线程安全
23
+
24
+ # 注册关闭事件
25
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
26
+
27
+ @classmethod
28
+ def from_crawler(cls, crawler):
29
+ return cls(crawler)
30
+
31
+ def _get_file_path(self) -> Path:
32
+ """获取输出文件路径"""
33
+ # 优先级:设置 > 爬虫属性 > 默认路径
34
+ file_path = (
35
+ self.settings.get('JSON_FILE') or
36
+ getattr(self.crawler.spider, 'json_file', None) or
37
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
38
+ )
39
+
40
+ path = Path(file_path)
41
+ path.parent.mkdir(parents=True, exist_ok=True)
42
+ return path
43
+
44
+ async def _ensure_file_open(self):
45
+ """确保文件已打开"""
46
+ if self.file_handle is None:
47
+ self.file_handle = open(self.file_path, 'w', encoding='utf-8')
48
+ self.logger.info(f"JSON文件已创建: {self.file_path}")
49
+
50
+ async def process_item(self, item, spider) -> Optional[dict]:
51
+ """处理item的核心方法"""
52
+ try:
53
+ async with self.lock:
54
+ await self._ensure_file_open()
55
+
56
+ # 转换为字典并序列化
57
+ item_dict = dict(item)
58
+ json_line = json.dumps(item_dict, ensure_ascii=False, indent=None)
59
+
60
+ # 写入文件(每行一个JSON对象)
61
+ self.file_handle.write(json_line + '\n')
62
+ self.file_handle.flush() # 立即刷新到磁盘
63
+
64
+ # 统计
65
+ self.crawler.stats.inc_value('json_pipeline/items_written')
66
+ self.logger.debug(f"写入JSON项: {len(item_dict)} 字段")
67
+
68
+ return item
69
+
70
+ except Exception as e:
71
+ self.crawler.stats.inc_value('json_pipeline/items_failed')
72
+ self.logger.error(f"JSON写入失败: {e}")
73
+ raise ItemDiscard(f"JSON Pipeline处理失败: {e}")
74
+
75
+ async def spider_closed(self):
76
+ """关闭爬虫时清理资源"""
77
+ if self.file_handle:
78
+ self.file_handle.close()
79
+ self.logger.info(f"JSON文件已关闭: {self.file_path}")
80
+
81
+
82
+ class JsonLinesPipeline:
83
+ """JSON Lines格式输出管道(每行一个JSON对象)"""
84
+
85
+ def __init__(self, crawler):
86
+ self.crawler = crawler
87
+ self.settings = crawler.settings
88
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
89
+
90
+ self.file_path = self._get_file_path()
91
+ self.file_handle = None
92
+ self.items_count = 0
93
+ self.lock = asyncio.Lock()
94
+
95
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
96
+
97
+ @classmethod
98
+ def from_crawler(cls, crawler):
99
+ return cls(crawler)
100
+
101
+ def _get_file_path(self) -> Path:
102
+ """获取输出文件路径"""
103
+ file_path = (
104
+ self.settings.get('JSONLINES_FILE') or
105
+ getattr(self.crawler.spider, 'jsonlines_file', None) or
106
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jsonl"
107
+ )
108
+
109
+ path = Path(file_path)
110
+ path.parent.mkdir(parents=True, exist_ok=True)
111
+ return path
112
+
113
+ async def _ensure_file_open(self):
114
+ """确保文件已打开"""
115
+ if self.file_handle is None:
116
+ self.file_handle = open(self.file_path, 'w', encoding='utf-8')
117
+ self.logger.info(f"JSONL文件已创建: {self.file_path}")
118
+
119
+ async def process_item(self, item, spider) -> Optional[dict]:
120
+ """处理item方法"""
121
+ try:
122
+ async with self.lock:
123
+ await self._ensure_file_open()
124
+
125
+ item_dict = dict(item)
126
+
127
+ # 添加元数据
128
+ if self.settings.get_bool('JSON_ADD_METADATA', False):
129
+ item_dict['_crawl_time'] = datetime.now().isoformat()
130
+ item_dict['_spider_name'] = spider.name
131
+
132
+ # 写入JSONL格式
133
+ json_line = json.dumps(item_dict, ensure_ascii=False, separators=(',', ':'))
134
+ self.file_handle.write(json_line + '\n')
135
+ self.file_handle.flush()
136
+
137
+ self.items_count += 1
138
+
139
+ # 定期日志输出
140
+ if self.items_count % 100 == 0:
141
+ self.logger.info(f"已写入 {self.items_count} 个JSON对象")
142
+
143
+ self.crawler.stats.inc_value('jsonlines_pipeline/items_written')
144
+
145
+ return item
146
+
147
+ except Exception as e:
148
+ self.crawler.stats.inc_value('jsonlines_pipeline/items_failed')
149
+ self.logger.error(f"JSONL写入失败: {e}")
150
+ raise ItemDiscard(f"JSON Lines Pipeline处理失败: {e}")
151
+
152
+ async def spider_closed(self):
153
+ """资源清理"""
154
+ if self.file_handle:
155
+ self.file_handle.close()
156
+ self.logger.info(f"JSONL文件已关闭,共写入 {self.items_count} 个项目: {self.file_path}")
157
+
158
+
159
+ class JsonArrayPipeline:
160
+ """JSON数组格式输出管道(所有item组成一个JSON数组)"""
161
+
162
+ def __init__(self, crawler):
163
+ self.crawler = crawler
164
+ self.settings = crawler.settings
165
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
166
+
167
+ self.file_path = self._get_file_path()
168
+ self.items = [] # 内存中暂存所有items
169
+ self.lock = asyncio.Lock()
170
+
171
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
172
+
173
+ @classmethod
174
+ def from_crawler(cls, crawler):
175
+ return cls(crawler)
176
+
177
+ def _get_file_path(self) -> Path:
178
+ """获取输出文件路径"""
179
+ file_path = (
180
+ self.settings.get('JSON_ARRAY_FILE') or
181
+ getattr(self.crawler.spider, 'json_array_file', None) or
182
+ f"output/{self.crawler.spider.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_array.json"
183
+ )
184
+
185
+ path = Path(file_path)
186
+ path.parent.mkdir(parents=True, exist_ok=True)
187
+ return path
188
+
189
+ async def process_item(self, item, spider) -> Optional[dict]:
190
+ """处理item方法"""
191
+ try:
192
+ async with self.lock:
193
+ item_dict = dict(item)
194
+ self.items.append(item_dict)
195
+
196
+ self.crawler.stats.inc_value('json_array_pipeline/items_collected')
197
+ self.logger.debug(f"收集item,当前总数: {len(self.items)}")
198
+
199
+ return item
200
+
201
+ except Exception as e:
202
+ self.crawler.stats.inc_value('json_array_pipeline/items_failed')
203
+ self.logger.error(f"JSON Array收集失败: {e}")
204
+ raise ItemDiscard(f"JSON Array Pipeline处理失败: {e}")
205
+
206
+ async def spider_closed(self):
207
+ """关闭时写入所有items到JSON数组文件"""
208
+ try:
209
+ if self.items:
210
+ with open(self.file_path, 'w', encoding='utf-8') as f:
211
+ json.dump(self.items, f, ensure_ascii=False, indent=2)
212
+
213
+ self.logger.info(f"JSON数组文件已保存,包含 {len(self.items)} 个项目: {self.file_path}")
214
+ self.crawler.stats.set_value('json_array_pipeline/total_items', len(self.items))
215
+ else:
216
+ self.logger.warning("没有items需要保存")
217
+
218
+ except Exception as e:
219
+ self.logger.error(f"保存JSON数组文件失败: {e}")
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 基于内存的数据项去重管道
5
+ ======================
6
+ 提供单节点环境下的数据项去重功能,防止保存重复的数据记录。
7
+
8
+ 特点:
9
+ - 高性能: 使用内存集合进行快速查找
10
+ - 简单易用: 无需外部依赖
11
+ - 轻量级: 适用于小规模数据采集
12
+ - 低延迟: 内存操作无网络开销
13
+ """
14
+
15
+ import hashlib
16
+ from typing import Dict, Any, Set
17
+
18
+ from crawlo import Item
19
+ from crawlo.spider import Spider
20
+ from crawlo.utils.log import get_logger
21
+ from crawlo.exceptions import DropItem
22
+
23
+
24
+ class MemoryDedupPipeline:
25
+ """基于内存的数据项去重管道"""
26
+
27
+ def __init__(self, log_level: str = "INFO"):
28
+ """
29
+ 初始化内存去重管道
30
+
31
+ :param log_level: 日志级别
32
+ """
33
+ self.logger = get_logger(self.__class__.__name__, log_level)
34
+
35
+ # 使用集合存储已见过的数据项指纹
36
+ self.seen_items: Set[str] = set()
37
+ self.dropped_count = 0
38
+
39
+ self.logger.info("内存去重管道初始化完成")
40
+
41
+ @classmethod
42
+ def from_crawler(cls, crawler):
43
+ """从爬虫配置创建管道实例"""
44
+ settings = crawler.settings
45
+
46
+ return cls(
47
+ log_level=settings.get('LOG_LEVEL', 'INFO')
48
+ )
49
+
50
+ def process_item(self, item: Item, spider: Spider) -> Item:
51
+ """
52
+ 处理数据项,进行去重检查
53
+
54
+ :param item: 要处理的数据项
55
+ :param spider: 爬虫实例
56
+ :return: 处理后的数据项或抛出 DropItem 异常
57
+ """
58
+ try:
59
+ # 生成数据项指纹
60
+ fingerprint = self._generate_item_fingerprint(item)
61
+
62
+ # 检查指纹是否已存在
63
+ if fingerprint in self.seen_items:
64
+ # 如果已存在,丢弃这个数据项
65
+ self.dropped_count += 1
66
+ self.logger.debug(f"丢弃重复数据项: {fingerprint[:20]}...")
67
+ raise DropItem(f"重复的数据项: {fingerprint}")
68
+ else:
69
+ # 记录新数据项的指纹
70
+ self.seen_items.add(fingerprint)
71
+ self.logger.debug(f"处理新数据项: {fingerprint[:20]}...")
72
+ return item
73
+
74
+ except Exception as e:
75
+ self.logger.error(f"处理数据项时出错: {e}")
76
+ # 在错误时继续处理,避免丢失数据
77
+ return item
78
+
79
+ def _generate_item_fingerprint(self, item: Item) -> str:
80
+ """
81
+ 生成数据项指纹
82
+
83
+ 基于数据项的所有字段生成唯一指纹,用于去重判断。
84
+
85
+ :param item: 数据项
86
+ :return: 指纹字符串
87
+ """
88
+ # 将数据项转换为可序列化的字典
89
+ try:
90
+ item_dict = item.to_dict()
91
+ except AttributeError:
92
+ # 兼容没有to_dict方法的Item实现
93
+ item_dict = dict(item)
94
+
95
+ # 对字典进行排序以确保一致性
96
+ sorted_items = sorted(item_dict.items())
97
+
98
+ # 生成指纹字符串
99
+ fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
100
+
101
+ # 使用 SHA256 生成固定长度的指纹
102
+ return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
103
+
104
+ def close_spider(self, spider: Spider) -> None:
105
+ """
106
+ 爬虫关闭时的清理工作
107
+
108
+ :param spider: 爬虫实例
109
+ """
110
+ self.logger.info(f"爬虫 {spider.name} 关闭:")
111
+ self.logger.info(f" - 丢弃的重复数据项: {self.dropped_count}")
112
+ self.logger.info(f" - 内存中存储的指纹数: {len(self.seen_items)}")
113
+
114
+ # 清理内存
115
+ self.seen_items.clear()
116
+ self.dropped_count = 0
@@ -1,117 +1,117 @@
1
- # -*- coding: utf-8 -*-
2
- from typing import Optional
3
- from motor.motor_asyncio import AsyncIOMotorClient
4
- from pymongo.errors import PyMongoError
5
- from crawlo.utils.log import get_logger
6
- from crawlo.exceptions import ItemDiscard
7
-
8
-
9
- class MongoPipeline:
10
- def __init__(self, crawler):
11
- self.crawler = crawler
12
- self.settings = crawler.settings
13
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
14
-
15
- # 初始化连接参数
16
- self.client = None
17
- self.db = None
18
- self.collection = None
19
-
20
- # 配置默认值
21
- self.mongo_uri = self.settings.get('MONGO_URI', 'mongodb://localhost:27017')
22
- self.db_name = self.settings.get('MONGO_DATABASE', 'scrapy_db')
23
- self.collection_name = self.settings.get('MONGO_COLLECTION', crawler.spider.name)
24
-
25
- # 注册关闭事件
26
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
27
-
28
- @classmethod
29
- def from_crawler(cls, crawler):
30
- return cls(crawler)
31
-
32
- async def _ensure_connection(self):
33
- """确保连接已建立"""
34
- if self.client is None:
35
- self.client = AsyncIOMotorClient(self.mongo_uri)
36
- self.db = self.client[self.db_name]
37
- self.collection = self.db[self.collection_name]
38
- self.logger.info(f"MongoDB连接建立 (集合: {self.collection_name})")
39
-
40
- async def process_item(self, item, spider) -> Optional[dict]:
41
- """处理item的核心方法"""
42
- try:
43
- await self._ensure_connection()
44
-
45
- item_dict = dict(item)
46
- result = await self.collection.insert_one(item_dict)
47
-
48
- # 统计计数
49
- self.crawler.stats.inc_value('mongodb/inserted')
50
- self.logger.debug(f"插入文档ID: {result.inserted_id}")
51
-
52
- return item
53
-
54
- except Exception as e:
55
- self.crawler.stats.inc_value('mongodb/failed')
56
- self.logger.error(f"MongoDB插入失败: {e}")
57
- raise ItemDiscard(f"MongoDB操作失败: {e}")
58
-
59
- async def spider_closed(self):
60
- """关闭爬虫时清理资源"""
61
- if self.client:
62
- self.client.close()
63
- self.logger.info("MongoDB连接已关闭")
64
-
65
-
66
- class MongoPoolPipeline:
67
- def __init__(self, crawler):
68
- self.crawler = crawler
69
- self.settings = crawler.settings
70
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
71
-
72
- # 连接池配置
73
- self.client = AsyncIOMotorClient(
74
- self.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
75
- maxPoolSize=self.settings.getint('MONGO_MAX_POOL_SIZE', 100),
76
- minPoolSize=self.settings.getint('MONGO_MIN_POOL_SIZE', 10),
77
- connectTimeoutMS=5000,
78
- socketTimeoutMS=30000
79
- )
80
-
81
- self.db = self.client[self.settings.get('MONGO_DATABASE', 'scrapy_db')]
82
- self.collection = self.db[self.settings.get('MONGO_COLLECTION', crawler.spider.name)]
83
-
84
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
85
- self.logger.info(f"MongoDB连接池已初始化 (集合: {self.collection.name})")
86
-
87
- @classmethod
88
- def create_instance(cls, crawler):
89
- return cls(crawler)
90
-
91
- async def process_item(self, item, spider) -> Optional[dict]:
92
- """处理item方法(带重试机制)"""
93
- try:
94
- item_dict = dict(item)
95
-
96
- # 带重试的插入操作
97
- for attempt in range(3):
98
- try:
99
- result = await self.collection.insert_one(item_dict)
100
- self.crawler.stats.inc_value('mongodb/insert_success')
101
- self.logger.debug(f"插入成功 [attempt {attempt + 1}]: {result.inserted_id}")
102
- return item
103
- except PyMongoError as e:
104
- if attempt == 2: # 最后一次尝试仍失败
105
- raise
106
- self.logger.warning(f"插入重试中 [attempt {attempt + 1}]: {e}")
107
-
108
- except Exception as e:
109
- self.crawler.stats.inc_value('mongodb/insert_failed')
110
- self.logger.error(f"MongoDB操作最终失败: {e}")
111
- raise ItemDiscard(f"MongoDB操作失败: {e}")
112
-
113
- async def spider_closed(self):
114
- """资源清理"""
115
- if hasattr(self, 'client'):
116
- self.client.close()
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from motor.motor_asyncio import AsyncIOMotorClient
4
+ from pymongo.errors import PyMongoError
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.exceptions import ItemDiscard
7
+
8
+
9
+ class MongoPipeline:
10
+ def __init__(self, crawler):
11
+ self.crawler = crawler
12
+ self.settings = crawler.settings
13
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
14
+
15
+ # 初始化连接参数
16
+ self.client = None
17
+ self.db = None
18
+ self.collection = None
19
+
20
+ # 配置默认值
21
+ self.mongo_uri = self.settings.get('MONGO_URI', 'mongodb://localhost:27017')
22
+ self.db_name = self.settings.get('MONGO_DATABASE', 'scrapy_db')
23
+ self.collection_name = self.settings.get('MONGO_COLLECTION', crawler.spider.name)
24
+
25
+ # 注册关闭事件
26
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
27
+
28
+ @classmethod
29
+ def from_crawler(cls, crawler):
30
+ return cls(crawler)
31
+
32
+ async def _ensure_connection(self):
33
+ """确保连接已建立"""
34
+ if self.client is None:
35
+ self.client = AsyncIOMotorClient(self.mongo_uri)
36
+ self.db = self.client[self.db_name]
37
+ self.collection = self.db[self.collection_name]
38
+ self.logger.info(f"MongoDB连接建立 (集合: {self.collection_name})")
39
+
40
+ async def process_item(self, item, spider) -> Optional[dict]:
41
+ """处理item的核心方法"""
42
+ try:
43
+ await self._ensure_connection()
44
+
45
+ item_dict = dict(item)
46
+ result = await self.collection.insert_one(item_dict)
47
+
48
+ # 统计计数
49
+ self.crawler.stats.inc_value('mongodb/inserted')
50
+ self.logger.debug(f"插入文档ID: {result.inserted_id}")
51
+
52
+ return item
53
+
54
+ except Exception as e:
55
+ self.crawler.stats.inc_value('mongodb/failed')
56
+ self.logger.error(f"MongoDB插入失败: {e}")
57
+ raise ItemDiscard(f"MongoDB操作失败: {e}")
58
+
59
+ async def spider_closed(self):
60
+ """关闭爬虫时清理资源"""
61
+ if self.client:
62
+ self.client.close()
63
+ self.logger.info("MongoDB连接已关闭")
64
+
65
+
66
+ class MongoPoolPipeline:
67
+ def __init__(self, crawler):
68
+ self.crawler = crawler
69
+ self.settings = crawler.settings
70
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
71
+
72
+ # 连接池配置
73
+ self.client = AsyncIOMotorClient(
74
+ self.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
75
+ maxPoolSize=self.settings.getint('MONGO_MAX_POOL_SIZE', 100),
76
+ minPoolSize=self.settings.getint('MONGO_MIN_POOL_SIZE', 10),
77
+ connectTimeoutMS=5000,
78
+ socketTimeoutMS=30000
79
+ )
80
+
81
+ self.db = self.client[self.settings.get('MONGO_DATABASE', 'scrapy_db')]
82
+ self.collection = self.db[self.settings.get('MONGO_COLLECTION', crawler.spider.name)]
83
+
84
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
85
+ self.logger.info(f"MongoDB连接池已初始化 (集合: {self.collection.name})")
86
+
87
+ @classmethod
88
+ def create_instance(cls, crawler):
89
+ return cls(crawler)
90
+
91
+ async def process_item(self, item, spider) -> Optional[dict]:
92
+ """处理item方法(带重试机制)"""
93
+ try:
94
+ item_dict = dict(item)
95
+
96
+ # 带重试的插入操作
97
+ for attempt in range(3):
98
+ try:
99
+ result = await self.collection.insert_one(item_dict)
100
+ self.crawler.stats.inc_value('mongodb/insert_success')
101
+ self.logger.debug(f"插入成功 [attempt {attempt + 1}]: {result.inserted_id}")
102
+ return item
103
+ except PyMongoError as e:
104
+ if attempt == 2: # 最后一次尝试仍失败
105
+ raise
106
+ self.logger.warning(f"插入重试中 [attempt {attempt + 1}]: {e}")
107
+
108
+ except Exception as e:
109
+ self.crawler.stats.inc_value('mongodb/insert_failed')
110
+ self.logger.error(f"MongoDB操作最终失败: {e}")
111
+ raise ItemDiscard(f"MongoDB操作失败: {e}")
112
+
113
+ async def spider_closed(self):
114
+ """资源清理"""
115
+ if hasattr(self, 'client'):
116
+ self.client.close()
117
117
  self.logger.info("MongoDB连接池已释放")