crawlo 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +2 -1
- crawlo/__version__.py +1 -1
- crawlo/commands/genspider.py +68 -42
- crawlo/commands/list.py +102 -93
- crawlo/commands/startproject.py +89 -4
- crawlo/commands/utils.py +187 -0
- crawlo/config.py +280 -0
- crawlo/core/engine.py +16 -3
- crawlo/core/enhanced_engine.py +190 -0
- crawlo/core/scheduler.py +113 -8
- crawlo/crawler.py +840 -307
- crawlo/downloader/__init__.py +181 -17
- crawlo/downloader/aiohttp_downloader.py +15 -2
- crawlo/downloader/cffi_downloader.py +11 -1
- crawlo/downloader/httpx_downloader.py +14 -3
- crawlo/filters/__init__.py +122 -5
- crawlo/filters/aioredis_filter.py +128 -36
- crawlo/filters/memory_filter.py +99 -32
- crawlo/middleware/proxy.py +11 -8
- crawlo/middleware/retry.py +40 -5
- crawlo/mode_manager.py +201 -0
- crawlo/network/__init__.py +17 -3
- crawlo/network/request.py +118 -10
- crawlo/network/response.py +131 -28
- crawlo/pipelines/__init__.py +1 -1
- crawlo/pipelines/csv_pipeline.py +317 -0
- crawlo/pipelines/json_pipeline.py +219 -0
- crawlo/queue/__init__.py +0 -0
- crawlo/queue/pqueue.py +37 -0
- crawlo/queue/queue_manager.py +304 -0
- crawlo/queue/redis_priority_queue.py +192 -0
- crawlo/settings/default_settings.py +68 -9
- crawlo/spider/__init__.py +576 -66
- crawlo/task_manager.py +4 -1
- crawlo/templates/project/middlewares.py.tmpl +56 -45
- crawlo/templates/project/pipelines.py.tmpl +308 -36
- crawlo/templates/project/run.py.tmpl +239 -0
- crawlo/templates/project/settings.py.tmpl +211 -17
- crawlo/templates/spider/spider.py.tmpl +153 -7
- crawlo/utils/controlled_spider_mixin.py +336 -0
- crawlo/utils/large_scale_config.py +287 -0
- crawlo/utils/large_scale_helper.py +344 -0
- crawlo/utils/queue_helper.py +176 -0
- crawlo/utils/request_serializer.py +220 -0
- crawlo-1.1.2.dist-info/METADATA +567 -0
- {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/RECORD +54 -46
- tests/test_final_validation.py +154 -0
- tests/test_redis_config.py +29 -0
- tests/test_redis_queue.py +225 -0
- tests/test_request_serialization.py +71 -0
- tests/test_scheduler.py +242 -0
- crawlo/pipelines/mysql_batch_pipline.py +0 -273
- crawlo/utils/pqueue.py +0 -174
- crawlo-1.1.1.dist-info/METADATA +0 -220
- examples/baidu_spider/__init__.py +0 -7
- examples/baidu_spider/demo.py +0 -94
- examples/baidu_spider/items.py +0 -46
- examples/baidu_spider/middleware.py +0 -49
- examples/baidu_spider/pipeline.py +0 -55
- examples/baidu_spider/run.py +0 -27
- examples/baidu_spider/settings.py +0 -121
- examples/baidu_spider/spiders/__init__.py +0 -7
- examples/baidu_spider/spiders/bai_du.py +0 -61
- examples/baidu_spider/spiders/miit.py +0 -159
- examples/baidu_spider/spiders/sina.py +0 -79
- {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
- {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
tests/test_scheduler.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
测试修复后的 Scheduler 分布式队列功能
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import sys
|
|
8
|
+
from unittest.mock import Mock
|
|
9
|
+
from crawlo.core.scheduler import Scheduler
|
|
10
|
+
from crawlo.network.request import Request
|
|
11
|
+
from crawlo.utils.log import get_logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MockCrawler:
|
|
15
|
+
"""模拟 Crawler 对象"""
|
|
16
|
+
def __init__(self, use_redis=True):
|
|
17
|
+
self.settings = MockSettings(use_redis)
|
|
18
|
+
self.stats = Mock()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MockSettings:
|
|
22
|
+
"""模拟 Settings 对象"""
|
|
23
|
+
def __init__(self, use_redis=True):
|
|
24
|
+
self.use_redis = use_redis
|
|
25
|
+
|
|
26
|
+
def get(self, key, default=None):
|
|
27
|
+
config = {
|
|
28
|
+
'FILTER_CLASS': 'crawlo.filters.memory_filter.MemoryFilter',
|
|
29
|
+
'LOG_LEVEL': 'INFO',
|
|
30
|
+
'DEPTH_PRIORITY': 1,
|
|
31
|
+
'SCHEDULER_MAX_QUEUE_SIZE': 100,
|
|
32
|
+
'SCHEDULER_QUEUE_NAME': 'test:crawlo:requests',
|
|
33
|
+
'FILTER_DEBUG': False,
|
|
34
|
+
'PROJECT_NAME': 'test',
|
|
35
|
+
}
|
|
36
|
+
if self.use_redis:
|
|
37
|
+
config['REDIS_URL'] = 'redis://localhost:6379/0'
|
|
38
|
+
|
|
39
|
+
return config.get(key, default)
|
|
40
|
+
|
|
41
|
+
def get_int(self, key, default=0):
|
|
42
|
+
value = self.get(key, default)
|
|
43
|
+
return int(value) if value is not None else default
|
|
44
|
+
|
|
45
|
+
def get_bool(self, key, default=False):
|
|
46
|
+
value = self.get(key, default)
|
|
47
|
+
if isinstance(value, bool):
|
|
48
|
+
return value
|
|
49
|
+
if isinstance(value, str):
|
|
50
|
+
return value.lower() in ('true', '1', 'yes')
|
|
51
|
+
return bool(value) if value is not None else default
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class MockFilter:
|
|
55
|
+
"""模拟去重过滤器"""
|
|
56
|
+
def __init__(self):
|
|
57
|
+
self.seen = set()
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def create_instance(cls, crawler):
|
|
61
|
+
return cls()
|
|
62
|
+
|
|
63
|
+
async def requested(self, request):
|
|
64
|
+
if request.url in self.seen:
|
|
65
|
+
return True
|
|
66
|
+
self.seen.add(request.url)
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
def log_stats(self, request):
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
async def test_memory_scheduler():
|
|
74
|
+
"""测试内存调度器"""
|
|
75
|
+
print("🔍 测试内存调度器...")
|
|
76
|
+
|
|
77
|
+
crawler = MockCrawler(use_redis=False)
|
|
78
|
+
scheduler = Scheduler.create_instance(crawler)
|
|
79
|
+
|
|
80
|
+
# 模拟去重过滤器
|
|
81
|
+
scheduler.dupe_filter = MockFilter()
|
|
82
|
+
|
|
83
|
+
scheduler.open()
|
|
84
|
+
|
|
85
|
+
# 测试入队
|
|
86
|
+
request1 = Request(url="https://example1.com")
|
|
87
|
+
request2 = Request(url="https://example2.com")
|
|
88
|
+
|
|
89
|
+
success1 = await scheduler.enqueue_request(request1)
|
|
90
|
+
success2 = await scheduler.enqueue_request(request2)
|
|
91
|
+
|
|
92
|
+
print(f" 📤 入队结果: {success1}, {success2}")
|
|
93
|
+
print(f" 📊 队列大小: {len(scheduler)}")
|
|
94
|
+
|
|
95
|
+
# 测试出队
|
|
96
|
+
req1 = await scheduler.next_request()
|
|
97
|
+
req2 = await scheduler.next_request()
|
|
98
|
+
|
|
99
|
+
print(f" 📥 出队结果: {req1.url if req1 else None}, {req2.url if req2 else None}")
|
|
100
|
+
print(f" 📊 剩余大小: {len(scheduler)}")
|
|
101
|
+
|
|
102
|
+
await scheduler.close()
|
|
103
|
+
print(" ✅ 内存调度器测试完成")
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
async def test_redis_scheduler():
|
|
107
|
+
"""测试 Redis 调度器"""
|
|
108
|
+
print("🔍 测试 Redis 调度器...")
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
crawler = MockCrawler(use_redis=True)
|
|
112
|
+
scheduler = Scheduler.create_instance(crawler)
|
|
113
|
+
|
|
114
|
+
# 模拟去重过滤器
|
|
115
|
+
scheduler.dupe_filter = MockFilter()
|
|
116
|
+
|
|
117
|
+
scheduler.open()
|
|
118
|
+
|
|
119
|
+
# 测试入队
|
|
120
|
+
request1 = Request(url="https://redis-test1.com", priority=5)
|
|
121
|
+
request2 = Request(url="https://redis-test2.com", priority=3)
|
|
122
|
+
request3 = Request(url="https://redis-test3.com", priority=8)
|
|
123
|
+
|
|
124
|
+
success1 = await scheduler.enqueue_request(request1)
|
|
125
|
+
success2 = await scheduler.enqueue_request(request2)
|
|
126
|
+
success3 = await scheduler.enqueue_request(request3)
|
|
127
|
+
|
|
128
|
+
print(f" 📤 入队结果: {success1}, {success2}, {success3}")
|
|
129
|
+
print(f" 📊 队列大小: {len(scheduler)}")
|
|
130
|
+
|
|
131
|
+
# 等待一小段时间让 Redis 操作完成
|
|
132
|
+
await asyncio.sleep(0.5)
|
|
133
|
+
|
|
134
|
+
# 测试出队(应该按优先级排序)
|
|
135
|
+
req1 = await scheduler.next_request()
|
|
136
|
+
req2 = await scheduler.next_request()
|
|
137
|
+
req3 = await scheduler.next_request()
|
|
138
|
+
|
|
139
|
+
print(" 📥 出队结果(按优先级):")
|
|
140
|
+
if req1:
|
|
141
|
+
print(f" {req1.url} (优先级: {getattr(req1, 'priority', 0)})")
|
|
142
|
+
if req2:
|
|
143
|
+
print(f" {req2.url} (优先级: {getattr(req2, 'priority', 0)})")
|
|
144
|
+
if req3:
|
|
145
|
+
print(f" {req3.url} (优先级: {getattr(req3, 'priority', 0)})")
|
|
146
|
+
|
|
147
|
+
print(f" 📊 剩余大小: {len(scheduler)}")
|
|
148
|
+
|
|
149
|
+
await scheduler.close()
|
|
150
|
+
print(" ✅ Redis 调度器测试完成")
|
|
151
|
+
|
|
152
|
+
except Exception as e:
|
|
153
|
+
print(f" ❌ Redis 调度器测试失败: {e}")
|
|
154
|
+
import traceback
|
|
155
|
+
traceback.print_exc()
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
async def test_concurrent_redis():
|
|
159
|
+
"""测试并发 Redis 操作"""
|
|
160
|
+
print("🔍 测试并发 Redis 操作...")
|
|
161
|
+
|
|
162
|
+
async def producer(scheduler, name, count):
|
|
163
|
+
"""生产者"""
|
|
164
|
+
for i in range(count):
|
|
165
|
+
request = Request(url=f"https://{name}-{i}.com", priority=i % 10)
|
|
166
|
+
await scheduler.enqueue_request(request)
|
|
167
|
+
await asyncio.sleep(0.01)
|
|
168
|
+
print(f" ✅ 生产者 {name} 完成 ({count} 个请求)")
|
|
169
|
+
|
|
170
|
+
async def consumer(scheduler, name, count):
|
|
171
|
+
"""消费者"""
|
|
172
|
+
consumed = 0
|
|
173
|
+
for _ in range(count):
|
|
174
|
+
request = await scheduler.next_request()
|
|
175
|
+
if request:
|
|
176
|
+
consumed += 1
|
|
177
|
+
await asyncio.sleep(0.005)
|
|
178
|
+
else:
|
|
179
|
+
break
|
|
180
|
+
print(f" ✅ 消费者 {name} 处理了 {consumed} 个请求")
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
crawler = MockCrawler(use_redis=True)
|
|
184
|
+
scheduler = Scheduler.create_instance(crawler)
|
|
185
|
+
scheduler.dupe_filter = MockFilter()
|
|
186
|
+
scheduler.open()
|
|
187
|
+
|
|
188
|
+
# 并发运行生产者和消费者
|
|
189
|
+
tasks = [
|
|
190
|
+
producer(scheduler, "producer-1", 5),
|
|
191
|
+
producer(scheduler, "producer-2", 5),
|
|
192
|
+
consumer(scheduler, "consumer-1", 3),
|
|
193
|
+
consumer(scheduler, "consumer-2", 3),
|
|
194
|
+
consumer(scheduler, "consumer-3", 4),
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
198
|
+
|
|
199
|
+
print(f" 📊 最终队列大小: {len(scheduler)}")
|
|
200
|
+
|
|
201
|
+
await scheduler.close()
|
|
202
|
+
print(" ✅ 并发测试完成")
|
|
203
|
+
|
|
204
|
+
except Exception as e:
|
|
205
|
+
print(f" ❌ 并发测试失败: {e}")
|
|
206
|
+
import traceback
|
|
207
|
+
traceback.print_exc()
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
async def main():
|
|
211
|
+
"""主测试函数"""
|
|
212
|
+
print("🚀 开始测试修复后的 Scheduler...")
|
|
213
|
+
print("=" * 50)
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
# 1. 测试内存调度器
|
|
217
|
+
await test_memory_scheduler()
|
|
218
|
+
print()
|
|
219
|
+
|
|
220
|
+
# 2. 测试 Redis 调度器
|
|
221
|
+
await test_redis_scheduler()
|
|
222
|
+
print()
|
|
223
|
+
|
|
224
|
+
# 3. 测试并发操作
|
|
225
|
+
await test_concurrent_redis()
|
|
226
|
+
|
|
227
|
+
print("=" * 50)
|
|
228
|
+
print("🎉 所有 Scheduler 测试完成!")
|
|
229
|
+
|
|
230
|
+
except Exception as e:
|
|
231
|
+
print("=" * 50)
|
|
232
|
+
print(f"❌ 测试失败: {e}")
|
|
233
|
+
import traceback
|
|
234
|
+
traceback.print_exc()
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
if __name__ == "__main__":
|
|
238
|
+
# 设置日志级别避免过多输出
|
|
239
|
+
import logging
|
|
240
|
+
logging.getLogger('crawlo').setLevel(logging.WARNING)
|
|
241
|
+
|
|
242
|
+
asyncio.run(main())
|
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
import asyncio
|
|
3
|
-
import aiomysql
|
|
4
|
-
from typing import Optional, List, Dict
|
|
5
|
-
from asyncmy import create_pool
|
|
6
|
-
from crawlo.utils.log import get_logger
|
|
7
|
-
from crawlo.exceptions import ItemDiscard
|
|
8
|
-
from crawlo.utils.tools import make_insert_sql, logger
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class AsyncmyMySQLPipeline:
|
|
12
|
-
def __init__(self, crawler):
|
|
13
|
-
self.crawler = crawler
|
|
14
|
-
self.settings = crawler.settings
|
|
15
|
-
self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
|
|
16
|
-
|
|
17
|
-
# 配置参数
|
|
18
|
-
self.table_name = (
|
|
19
|
-
self.settings.get('MYSQL_TABLE') or
|
|
20
|
-
getattr(crawler.spider, 'mysql_table', None) or
|
|
21
|
-
f"{crawler.spider.name}_items"
|
|
22
|
-
)
|
|
23
|
-
self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
|
|
24
|
-
self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0) # 秒
|
|
25
|
-
|
|
26
|
-
# 连接池相关
|
|
27
|
-
self._pool_lock = asyncio.Lock()
|
|
28
|
-
self._pool_initialized = False
|
|
29
|
-
self.pool = None
|
|
30
|
-
|
|
31
|
-
# 缓冲区与锁
|
|
32
|
-
self.items_buffer: List[Dict] = []
|
|
33
|
-
self.buffer_lock = asyncio.Lock()
|
|
34
|
-
|
|
35
|
-
# 后台任务
|
|
36
|
-
self.flush_task: Optional[asyncio.Task] = None
|
|
37
|
-
|
|
38
|
-
# 注册关闭事件
|
|
39
|
-
crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
|
|
40
|
-
|
|
41
|
-
@classmethod
|
|
42
|
-
def from_crawler(cls, crawler):
|
|
43
|
-
return cls(crawler)
|
|
44
|
-
|
|
45
|
-
async def _ensure_pool(self):
|
|
46
|
-
"""确保连接池已初始化(线程安全)"""
|
|
47
|
-
if self._pool_initialized:
|
|
48
|
-
return
|
|
49
|
-
|
|
50
|
-
async with self._pool_lock:
|
|
51
|
-
if not self._pool_initialized:
|
|
52
|
-
try:
|
|
53
|
-
self.pool = await create_pool(
|
|
54
|
-
host=self.settings.get('MYSQL_HOST', 'localhost'),
|
|
55
|
-
port=self.settings.get_int('MYSQL_PORT', 3306),
|
|
56
|
-
user=self.settings.get('MYSQL_USER', 'root'),
|
|
57
|
-
password=self.settings.get('MYSQL_PASSWORD', ''),
|
|
58
|
-
db=self.settings.get('MYSQL_DB', 'scrapy_db'),
|
|
59
|
-
minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
|
|
60
|
-
maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
|
|
61
|
-
echo=self.settings.get_bool('MYSQL_ECHO', False)
|
|
62
|
-
)
|
|
63
|
-
self._pool_initialized = True
|
|
64
|
-
self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
|
|
65
|
-
except Exception as e:
|
|
66
|
-
self.logger.error(f"MySQL连接池初始化失败: {e}")
|
|
67
|
-
raise
|
|
68
|
-
|
|
69
|
-
async def open_spider(self, spider):
|
|
70
|
-
"""爬虫启动时初始化后台刷新任务"""
|
|
71
|
-
await self._ensure_pool()
|
|
72
|
-
self.flush_task = asyncio.create_task(self._flush_loop())
|
|
73
|
-
|
|
74
|
-
async def _flush_loop(self):
|
|
75
|
-
"""后台循环:定期检查是否需要刷新缓冲区"""
|
|
76
|
-
while True:
|
|
77
|
-
await asyncio.sleep(self.flush_interval)
|
|
78
|
-
if len(self.items_buffer) > 0:
|
|
79
|
-
await self._flush_buffer()
|
|
80
|
-
|
|
81
|
-
async def _flush_buffer(self):
|
|
82
|
-
"""将缓冲区中的数据批量写入数据库"""
|
|
83
|
-
async with self.buffer_lock:
|
|
84
|
-
if not self.items_buffer:
|
|
85
|
-
return
|
|
86
|
-
|
|
87
|
-
items_to_insert = self.items_buffer.copy()
|
|
88
|
-
self.items_buffer.clear()
|
|
89
|
-
|
|
90
|
-
try:
|
|
91
|
-
await self._ensure_pool()
|
|
92
|
-
first_item = items_to_insert[0]
|
|
93
|
-
sql = make_insert_sql(table=self.table_name, data=first_item, many=True)
|
|
94
|
-
|
|
95
|
-
values = [list(item.values()) for item in items_to_insert]
|
|
96
|
-
|
|
97
|
-
async with self.pool.acquire() as conn:
|
|
98
|
-
async with conn.cursor() as cursor:
|
|
99
|
-
affected_rows = await cursor.executemany(sql, values)
|
|
100
|
-
await conn.commit()
|
|
101
|
-
|
|
102
|
-
spider_name = getattr(self.crawler.spider, 'name', 'unknown')
|
|
103
|
-
self.logger.info(f"批量插入 {affected_rows} 条记录到 {self.table_name}")
|
|
104
|
-
self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
|
|
105
|
-
|
|
106
|
-
except Exception as e:
|
|
107
|
-
self.logger.error(f"批量插入失败: {e}")
|
|
108
|
-
self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
|
|
109
|
-
# 可选:重试或丢弃
|
|
110
|
-
raise ItemDiscard(f"批量插入失败: {e}")
|
|
111
|
-
|
|
112
|
-
async def process_item(self, item, spider, kwargs=None) -> dict:
|
|
113
|
-
"""将 item 添加到缓冲区,触发批量插入"""
|
|
114
|
-
item_dict = dict(item)
|
|
115
|
-
|
|
116
|
-
async with self.buffer_lock:
|
|
117
|
-
self.items_buffer.append(item_dict)
|
|
118
|
-
if len(self.items_buffer) >= self.batch_size:
|
|
119
|
-
# 达到批量阈值,立即刷新
|
|
120
|
-
await self._flush_buffer()
|
|
121
|
-
|
|
122
|
-
return item
|
|
123
|
-
|
|
124
|
-
async def spider_closed(self):
|
|
125
|
-
"""关闭爬虫时,确保所有剩余数据被写入"""
|
|
126
|
-
if self.flush_task:
|
|
127
|
-
self.flush_task.cancel()
|
|
128
|
-
try:
|
|
129
|
-
await self.flush_task
|
|
130
|
-
except asyncio.CancelledError:
|
|
131
|
-
pass
|
|
132
|
-
|
|
133
|
-
# 刷最后一批数据
|
|
134
|
-
if self.items_buffer:
|
|
135
|
-
await self._flush_buffer()
|
|
136
|
-
|
|
137
|
-
# 关闭连接池
|
|
138
|
-
if self.pool:
|
|
139
|
-
self.pool.close()
|
|
140
|
-
await self.pool.wait_closed()
|
|
141
|
-
self.logger.info("MySQL连接池已关闭")
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
class AiomysqlMySQLPipeline:
|
|
145
|
-
def __init__(self, crawler):
|
|
146
|
-
self.crawler = crawler
|
|
147
|
-
self.settings = crawler.settings
|
|
148
|
-
self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
|
|
149
|
-
|
|
150
|
-
# 配置
|
|
151
|
-
self.table_name = (
|
|
152
|
-
self.settings.get('MYSQL_TABLE') or
|
|
153
|
-
getattr(crawler.spider, 'mysql_table', None) or
|
|
154
|
-
f"{crawler.spider.name}_items"
|
|
155
|
-
)
|
|
156
|
-
self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
|
|
157
|
-
self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0)
|
|
158
|
-
|
|
159
|
-
# 连接池
|
|
160
|
-
self._pool_lock = asyncio.Lock()
|
|
161
|
-
self._pool_initialized = False
|
|
162
|
-
self.pool = None
|
|
163
|
-
|
|
164
|
-
# 缓冲
|
|
165
|
-
self.items_buffer: List[Dict] = []
|
|
166
|
-
self.buffer_lock = asyncio.Lock()
|
|
167
|
-
|
|
168
|
-
# 后台任务
|
|
169
|
-
self.flush_task: Optional[asyncio.Task] = None
|
|
170
|
-
|
|
171
|
-
crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
|
|
172
|
-
|
|
173
|
-
@classmethod
|
|
174
|
-
def create_instance(cls, crawler):
|
|
175
|
-
return cls(crawler)
|
|
176
|
-
|
|
177
|
-
async def _init_pool(self):
|
|
178
|
-
"""延迟初始化连接池(线程安全)"""
|
|
179
|
-
if self._pool_initialized:
|
|
180
|
-
return
|
|
181
|
-
|
|
182
|
-
async with self._pool_lock:
|
|
183
|
-
if not self._pool_initialized:
|
|
184
|
-
try:
|
|
185
|
-
self.pool = await aiomysql.create_pool(
|
|
186
|
-
host=self.settings.get('MYSQL_HOST', 'localhost'),
|
|
187
|
-
port=self.settings.getint('MYSQL_PORT', 3306),
|
|
188
|
-
user=self.settings.get('MYSQL_USER', 'root'),
|
|
189
|
-
password=self.settings.get('MYSQL_PASSWORD', ''),
|
|
190
|
-
db=self.settings.get('MYSQL_DB', 'scrapy_db'),
|
|
191
|
-
minsize=self.settings.getint('MYSQL_POOL_MIN', 3),
|
|
192
|
-
maxsize=self.settings.getint('MYSQL_POOL_MAX', 10),
|
|
193
|
-
cursorclass=aiomysql.DictCursor,
|
|
194
|
-
autocommit=False
|
|
195
|
-
)
|
|
196
|
-
self._pool_initialized = True
|
|
197
|
-
self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
|
|
198
|
-
except Exception as e:
|
|
199
|
-
self.logger.error(f"aiomysql连接池初始化失败: {e}")
|
|
200
|
-
raise
|
|
201
|
-
|
|
202
|
-
async def open_spider(self, spider):
|
|
203
|
-
"""爬虫启动时创建后台刷新任务"""
|
|
204
|
-
await self._init_pool()
|
|
205
|
-
self.flush_task = asyncio.create_task(self._flush_loop())
|
|
206
|
-
|
|
207
|
-
async def _flush_loop(self):
|
|
208
|
-
"""定期刷新缓冲区"""
|
|
209
|
-
while True:
|
|
210
|
-
await asyncio.sleep(self.flush_interval)
|
|
211
|
-
if len(self.items_buffer) > 0:
|
|
212
|
-
await self._flush_buffer()
|
|
213
|
-
|
|
214
|
-
async def _flush_buffer(self):
|
|
215
|
-
"""执行批量插入"""
|
|
216
|
-
async with self.buffer_lock:
|
|
217
|
-
if not self.items_buffer:
|
|
218
|
-
return
|
|
219
|
-
items_to_insert = self.items_buffer.copy()
|
|
220
|
-
self.items_buffer.clear()
|
|
221
|
-
|
|
222
|
-
try:
|
|
223
|
-
await self._init_pool()
|
|
224
|
-
keys = items_to_insert[0].keys()
|
|
225
|
-
placeholders = ', '.join(['%s'] * len(keys))
|
|
226
|
-
columns = ', '.join([f'`{k}`' for k in keys])
|
|
227
|
-
sql = f"INSERT INTO `{self.table_name}` ({columns}) VALUES ({placeholders})"
|
|
228
|
-
|
|
229
|
-
values = [list(item.values()) for item in items_to_insert]
|
|
230
|
-
|
|
231
|
-
async with self.pool.acquire() as conn:
|
|
232
|
-
async with conn.cursor() as cursor:
|
|
233
|
-
result = await cursor.executemany(sql, values)
|
|
234
|
-
await conn.commit()
|
|
235
|
-
|
|
236
|
-
spider_name = getattr(self.crawler.spider, 'name', 'unknown')
|
|
237
|
-
self.logger.info(f"【{spider_name}】批量插入 {result} 条记录到 {self.table_name}")
|
|
238
|
-
self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
|
|
239
|
-
|
|
240
|
-
except aiomysql.Error as e:
|
|
241
|
-
self.logger.error(f"aiomysql批量插入失败: {e}")
|
|
242
|
-
self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
|
|
243
|
-
raise ItemDiscard(f"MySQL错误: {e.args[1]}")
|
|
244
|
-
except Exception as e:
|
|
245
|
-
self.logger.error(f"未知错误: {e}")
|
|
246
|
-
raise ItemDiscard(f"处理失败: {e}")
|
|
247
|
-
|
|
248
|
-
async def process_item(self, item, spider) -> dict:
|
|
249
|
-
item_dict = dict(item)
|
|
250
|
-
|
|
251
|
-
async with self.buffer_lock:
|
|
252
|
-
self.items_buffer.append(item_dict)
|
|
253
|
-
if len(self.items_buffer) >= self.batch_size:
|
|
254
|
-
await self._flush_buffer()
|
|
255
|
-
|
|
256
|
-
return item
|
|
257
|
-
|
|
258
|
-
async def spider_closed(self):
|
|
259
|
-
"""清理资源并提交剩余数据"""
|
|
260
|
-
if self.flush_task:
|
|
261
|
-
self.flush_task.cancel()
|
|
262
|
-
try:
|
|
263
|
-
await self.flush_task
|
|
264
|
-
except asyncio.CancelledError:
|
|
265
|
-
pass
|
|
266
|
-
|
|
267
|
-
if self.items_buffer:
|
|
268
|
-
await self._flush_buffer()
|
|
269
|
-
|
|
270
|
-
if self.pool:
|
|
271
|
-
self.pool.close()
|
|
272
|
-
await self.pool.wait_closed()
|
|
273
|
-
self.logger.info("aiomysql连接池已释放")
|
crawlo/utils/pqueue.py
DELETED
|
@@ -1,174 +0,0 @@
|
|
|
1
|
-
# -*- coding:UTF-8 -*-
|
|
2
|
-
import sys
|
|
3
|
-
import asyncio
|
|
4
|
-
import warnings
|
|
5
|
-
from urllib.parse import urlparse
|
|
6
|
-
from asyncio import PriorityQueue
|
|
7
|
-
from redis.asyncio import from_url
|
|
8
|
-
from typing import Any, Optional, Dict, Annotated
|
|
9
|
-
from pydantic import (
|
|
10
|
-
BaseModel,
|
|
11
|
-
Field,
|
|
12
|
-
model_validator
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
from crawlo import Request
|
|
16
|
-
from crawlo.settings.default_settings import REDIS_URL
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class SpiderPriorityQueue(PriorityQueue):
|
|
20
|
-
"""带超时功能的异步优先级队列"""
|
|
21
|
-
|
|
22
|
-
def __init__(self, maxsize: int = 0) -> None:
|
|
23
|
-
"""初始化队列,maxsize为0表示无大小限制"""
|
|
24
|
-
super().__init__(maxsize)
|
|
25
|
-
|
|
26
|
-
async def get(self, timeout: float = 0.1) -> Optional[Request]:
|
|
27
|
-
"""
|
|
28
|
-
异步获取队列元素,带超时功能
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
timeout: 超时时间(秒),默认0.1秒
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
队列元素(优先级, 值)或None(超时)
|
|
35
|
-
"""
|
|
36
|
-
try:
|
|
37
|
-
# 根据Python版本选择超时实现方式
|
|
38
|
-
if sys.version_info >= (3, 11):
|
|
39
|
-
async with asyncio.timeout(timeout):
|
|
40
|
-
return await super().get()
|
|
41
|
-
else:
|
|
42
|
-
return await asyncio.wait_for(super().get(), timeout=timeout)
|
|
43
|
-
except asyncio.TimeoutError:
|
|
44
|
-
return None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class TaskModel(BaseModel):
|
|
48
|
-
"""爬虫任务数据模型 (完全兼容Pydantic V2)"""
|
|
49
|
-
url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
|
|
50
|
-
meta: Dict[str, Any] = Field(default_factory=dict)
|
|
51
|
-
priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
|
|
52
|
-
|
|
53
|
-
@classmethod
|
|
54
|
-
def validate_url(cls, v: str) -> str:
|
|
55
|
-
"""验证URL格式"""
|
|
56
|
-
if not v.startswith(('http://', 'https://')):
|
|
57
|
-
raise ValueError('URL必须以 http:// 或 https:// 开头')
|
|
58
|
-
|
|
59
|
-
parsed = urlparse(v)
|
|
60
|
-
if not parsed.netloc:
|
|
61
|
-
raise ValueError('URL缺少有效域名')
|
|
62
|
-
|
|
63
|
-
return v.strip()
|
|
64
|
-
|
|
65
|
-
@model_validator(mode='after')
|
|
66
|
-
def validate_priority_logic(self) -> 'TaskModel':
|
|
67
|
-
"""跨字段验证示例"""
|
|
68
|
-
if 'admin' in self.url and self.priority > 5:
|
|
69
|
-
self.priority = 5 # 自动调整管理页面的优先级
|
|
70
|
-
return self
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class DistributedPriorityQueue:
|
|
74
|
-
def __init__(
|
|
75
|
-
self,
|
|
76
|
-
redis_url: str,
|
|
77
|
-
queue_name: str = "spider_queue",
|
|
78
|
-
max_connections: int = 10,
|
|
79
|
-
health_check_interval: int = 30
|
|
80
|
-
):
|
|
81
|
-
"""
|
|
82
|
-
Args:
|
|
83
|
-
redis_url: redis://[:password]@host:port[/db]
|
|
84
|
-
queue_name: Redis有序集合键名
|
|
85
|
-
max_connections: 连接池大小
|
|
86
|
-
health_check_interval: 连接健康检查间隔(秒)
|
|
87
|
-
"""
|
|
88
|
-
self.redis = from_url(
|
|
89
|
-
redis_url,
|
|
90
|
-
max_connections=max_connections,
|
|
91
|
-
health_check_interval=health_check_interval,
|
|
92
|
-
socket_keepalive=True,
|
|
93
|
-
decode_responses=True
|
|
94
|
-
)
|
|
95
|
-
self.queue_name = queue_name
|
|
96
|
-
|
|
97
|
-
async def put(self, task: TaskModel) -> bool:
|
|
98
|
-
"""
|
|
99
|
-
添加任务到队列(使用Pydantic V2的model_dump_json)
|
|
100
|
-
|
|
101
|
-
Args:
|
|
102
|
-
task: 已验证的TaskModel实例
|
|
103
|
-
|
|
104
|
-
Returns:
|
|
105
|
-
bool: 是否成功添加 (Redis的ZADD返回添加数量)
|
|
106
|
-
"""
|
|
107
|
-
with warnings.catch_warnings():
|
|
108
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
109
|
-
task_str = task.model_dump_json() # 正确使用V2的序列化方法
|
|
110
|
-
return await self.redis.zadd(
|
|
111
|
-
self.queue_name,
|
|
112
|
-
{task_str: task.priority}
|
|
113
|
-
) > 0
|
|
114
|
-
|
|
115
|
-
async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
|
|
116
|
-
"""
|
|
117
|
-
获取优先级最高的任务(自动验证)
|
|
118
|
-
|
|
119
|
-
Args:
|
|
120
|
-
timeout: 阻塞超时时间(秒)
|
|
121
|
-
|
|
122
|
-
Returns:
|
|
123
|
-
TaskModel实例或None(超时/队列空)
|
|
124
|
-
"""
|
|
125
|
-
try:
|
|
126
|
-
result = await self.redis.bzpopmax(
|
|
127
|
-
self.queue_name,
|
|
128
|
-
timeout=timeout
|
|
129
|
-
)
|
|
130
|
-
if result:
|
|
131
|
-
_, task_str, _ = result
|
|
132
|
-
with warnings.catch_warnings():
|
|
133
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
134
|
-
return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
|
|
135
|
-
except Exception as e:
|
|
136
|
-
print(f"任务获取失败: {type(e).__name__}: {e}")
|
|
137
|
-
return None
|
|
138
|
-
|
|
139
|
-
async def aclose(self):
|
|
140
|
-
"""安全关闭连接"""
|
|
141
|
-
await self.redis.aclose()
|
|
142
|
-
|
|
143
|
-
async def __aenter__(self):
|
|
144
|
-
return self
|
|
145
|
-
|
|
146
|
-
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
147
|
-
await self.aclose()
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# 使用示例
|
|
151
|
-
async def demo():
|
|
152
|
-
async with DistributedPriorityQueue(
|
|
153
|
-
REDIS_URL,
|
|
154
|
-
max_connections=20,
|
|
155
|
-
health_check_interval=10
|
|
156
|
-
) as queue:
|
|
157
|
-
# 添加任务(自动触发验证)
|
|
158
|
-
task = TaskModel(
|
|
159
|
-
url="https://example.com/1",
|
|
160
|
-
priority=1,
|
|
161
|
-
meta={"depth": 2}
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
if await queue.put(task):
|
|
165
|
-
print(f"任务添加成功: {task.url}")
|
|
166
|
-
|
|
167
|
-
# 获取任务
|
|
168
|
-
if result := await queue.get(timeout=2.0):
|
|
169
|
-
print(f"获取任务: {result.url} (优先级={result.priority})")
|
|
170
|
-
print(f"元数据: {result.meta}")
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
if __name__ == "__main__":
|
|
174
|
-
asyncio.run(demo())
|