crawlo 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (112) hide show
  1. crawlo/__init__.py +25 -9
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +41 -0
  4. crawlo/commands/__init__.py +10 -0
  5. crawlo/commands/genspider.py +111 -0
  6. crawlo/commands/run.py +149 -0
  7. crawlo/commands/startproject.py +101 -0
  8. crawlo/core/__init__.py +2 -2
  9. crawlo/core/engine.py +158 -158
  10. crawlo/core/processor.py +40 -40
  11. crawlo/core/scheduler.py +57 -57
  12. crawlo/crawler.py +219 -242
  13. crawlo/downloader/__init__.py +78 -78
  14. crawlo/downloader/aiohttp_downloader.py +200 -259
  15. crawlo/downloader/cffi_downloader.py +277 -0
  16. crawlo/downloader/httpx_downloader.py +246 -187
  17. crawlo/event.py +11 -11
  18. crawlo/exceptions.py +78 -64
  19. crawlo/extension/__init__.py +31 -31
  20. crawlo/extension/log_interval.py +49 -49
  21. crawlo/extension/log_stats.py +44 -44
  22. crawlo/extension/logging_extension.py +35 -0
  23. crawlo/filters/__init__.py +37 -37
  24. crawlo/filters/aioredis_filter.py +150 -150
  25. crawlo/filters/memory_filter.py +202 -202
  26. crawlo/items/__init__.py +22 -62
  27. crawlo/items/base.py +31 -0
  28. crawlo/items/fields.py +54 -0
  29. crawlo/items/items.py +105 -119
  30. crawlo/middleware/__init__.py +21 -21
  31. crawlo/middleware/default_header.py +32 -32
  32. crawlo/middleware/download_delay.py +28 -28
  33. crawlo/middleware/middleware_manager.py +135 -140
  34. crawlo/middleware/proxy.py +246 -0
  35. crawlo/middleware/request_ignore.py +30 -30
  36. crawlo/middleware/response_code.py +18 -18
  37. crawlo/middleware/response_filter.py +26 -26
  38. crawlo/middleware/retry.py +90 -90
  39. crawlo/network/__init__.py +7 -7
  40. crawlo/network/request.py +203 -204
  41. crawlo/network/response.py +166 -166
  42. crawlo/pipelines/__init__.py +13 -13
  43. crawlo/pipelines/console_pipeline.py +39 -39
  44. crawlo/pipelines/mongo_pipeline.py +116 -116
  45. crawlo/pipelines/mysql_batch_pipline.py +273 -134
  46. crawlo/pipelines/mysql_pipeline.py +195 -195
  47. crawlo/pipelines/pipeline_manager.py +56 -56
  48. crawlo/settings/__init__.py +7 -7
  49. crawlo/settings/default_settings.py +169 -94
  50. crawlo/settings/setting_manager.py +99 -99
  51. crawlo/spider/__init__.py +41 -36
  52. crawlo/stats_collector.py +59 -59
  53. crawlo/subscriber.py +106 -106
  54. crawlo/task_manager.py +27 -27
  55. crawlo/templates/crawlo.cfg.tmpl +11 -0
  56. crawlo/templates/project/__init__.py.tmpl +4 -0
  57. crawlo/templates/project/items.py.tmpl +18 -0
  58. crawlo/templates/project/middlewares.py.tmpl +76 -0
  59. crawlo/templates/project/pipelines.py.tmpl +64 -0
  60. crawlo/templates/project/settings.py.tmpl +54 -0
  61. crawlo/templates/project/spiders/__init__.py.tmpl +6 -0
  62. crawlo/templates/spider/spider.py.tmpl +32 -0
  63. crawlo/utils/__init__.py +7 -7
  64. crawlo/utils/concurrency_manager.py +124 -124
  65. crawlo/utils/date_tools.py +233 -177
  66. crawlo/utils/db_helper.py +344 -0
  67. crawlo/utils/func_tools.py +82 -82
  68. crawlo/utils/log.py +129 -39
  69. crawlo/utils/pqueue.py +173 -173
  70. crawlo/utils/project.py +199 -59
  71. crawlo/utils/request.py +267 -122
  72. crawlo/utils/spider_loader.py +63 -0
  73. crawlo/utils/system.py +11 -11
  74. crawlo/utils/tools.py +5 -303
  75. crawlo/utils/url.py +39 -39
  76. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/METADATA +49 -48
  77. crawlo-1.0.6.dist-info/RECORD +94 -0
  78. crawlo-1.0.6.dist-info/entry_points.txt +2 -0
  79. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/top_level.txt +1 -0
  80. examples/gxb/items.py +36 -0
  81. examples/gxb/run.py +16 -0
  82. examples/gxb/settings.py +72 -0
  83. examples/gxb/spider/__init__.py +0 -0
  84. examples/gxb/spider/miit_spider.py +180 -0
  85. examples/gxb/spider/telecom_device.py +129 -0
  86. tests/__init__.py +7 -7
  87. tests/test_proxy_health_check.py +33 -0
  88. tests/test_proxy_middleware_integration.py +137 -0
  89. tests/test_proxy_providers.py +57 -0
  90. tests/test_proxy_stats.py +20 -0
  91. tests/test_proxy_strategies.py +60 -0
  92. crawlo/downloader/playwright_downloader.py +0 -161
  93. crawlo/templates/item_template.tmpl +0 -22
  94. crawlo/templates/project_template/main.py +0 -33
  95. crawlo/templates/project_template/setting.py +0 -190
  96. crawlo/templates/spider_template.tmpl +0 -31
  97. crawlo-1.0.4.dist-info/RECORD +0 -79
  98. crawlo-1.0.4.dist-info/entry_points.txt +0 -2
  99. tests/baidu_spider/__init__.py +0 -7
  100. tests/baidu_spider/demo.py +0 -94
  101. tests/baidu_spider/items.py +0 -25
  102. tests/baidu_spider/middleware.py +0 -49
  103. tests/baidu_spider/pipeline.py +0 -55
  104. tests/baidu_spider/request_fingerprints.txt +0 -9
  105. tests/baidu_spider/run.py +0 -27
  106. tests/baidu_spider/settings.py +0 -80
  107. tests/baidu_spider/spiders/__init__.py +0 -7
  108. tests/baidu_spider/spiders/bai_du.py +0 -61
  109. tests/baidu_spider/spiders/sina.py +0 -79
  110. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/WHEEL +0 -0
  111. {crawlo/templates/project_template/items → examples}/__init__.py +0 -0
  112. {crawlo/templates/project_template/spiders → examples/gxb}/__init__.py +0 -0
@@ -1,117 +1,117 @@
1
- # -*- coding: utf-8 -*-
2
- from typing import Optional
3
- from motor.motor_asyncio import AsyncIOMotorClient
4
- from pymongo.errors import PyMongoError
5
- from crawlo.utils.log import get_logger
6
- from crawlo.exceptions import ItemDiscard
7
-
8
-
9
- class MongoPipeline:
10
- def __init__(self, crawler):
11
- self.crawler = crawler
12
- self.settings = crawler.settings
13
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
14
-
15
- # 初始化连接参数
16
- self.client = None
17
- self.db = None
18
- self.collection = None
19
-
20
- # 配置默认值
21
- self.mongo_uri = self.settings.get('MONGO_URI', 'mongodb://localhost:27017')
22
- self.db_name = self.settings.get('MONGO_DATABASE', 'scrapy_db')
23
- self.collection_name = self.settings.get('MONGO_COLLECTION', crawler.spider.name)
24
-
25
- # 注册关闭事件
26
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
27
-
28
- @classmethod
29
- def from_crawler(cls, crawler):
30
- return cls(crawler)
31
-
32
- async def _ensure_connection(self):
33
- """确保连接已建立"""
34
- if self.client is None:
35
- self.client = AsyncIOMotorClient(self.mongo_uri)
36
- self.db = self.client[self.db_name]
37
- self.collection = self.db[self.collection_name]
38
- self.logger.info(f"MongoDB连接建立 (集合: {self.collection_name})")
39
-
40
- async def process_item(self, item, spider) -> Optional[dict]:
41
- """处理item的核心方法"""
42
- try:
43
- await self._ensure_connection()
44
-
45
- item_dict = dict(item)
46
- result = await self.collection.insert_one(item_dict)
47
-
48
- # 统计计数
49
- self.crawler.stats.inc_value('mongodb/inserted')
50
- self.logger.debug(f"插入文档ID: {result.inserted_id}")
51
-
52
- return item
53
-
54
- except Exception as e:
55
- self.crawler.stats.inc_value('mongodb/failed')
56
- self.logger.error(f"MongoDB插入失败: {e}")
57
- raise ItemDiscard(f"MongoDB操作失败: {e}")
58
-
59
- async def spider_closed(self):
60
- """关闭爬虫时清理资源"""
61
- if self.client:
62
- self.client.close()
63
- self.logger.info("MongoDB连接已关闭")
64
-
65
-
66
- class MongoPoolPipeline:
67
- def __init__(self, crawler):
68
- self.crawler = crawler
69
- self.settings = crawler.settings
70
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
71
-
72
- # 连接池配置
73
- self.client = AsyncIOMotorClient(
74
- self.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
75
- maxPoolSize=self.settings.getint('MONGO_MAX_POOL_SIZE', 100),
76
- minPoolSize=self.settings.getint('MONGO_MIN_POOL_SIZE', 10),
77
- connectTimeoutMS=5000,
78
- socketTimeoutMS=30000
79
- )
80
-
81
- self.db = self.client[self.settings.get('MONGO_DATABASE', 'scrapy_db')]
82
- self.collection = self.db[self.settings.get('MONGO_COLLECTION', crawler.spider.name)]
83
-
84
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
85
- self.logger.info(f"MongoDB连接池已初始化 (集合: {self.collection.name})")
86
-
87
- @classmethod
88
- def create_instance(cls, crawler):
89
- return cls(crawler)
90
-
91
- async def process_item(self, item, spider) -> Optional[dict]:
92
- """处理item方法(带重试机制)"""
93
- try:
94
- item_dict = dict(item)
95
-
96
- # 带重试的插入操作
97
- for attempt in range(3):
98
- try:
99
- result = await self.collection.insert_one(item_dict)
100
- self.crawler.stats.inc_value('mongodb/insert_success')
101
- self.logger.debug(f"插入成功 [attempt {attempt + 1}]: {result.inserted_id}")
102
- return item
103
- except PyMongoError as e:
104
- if attempt == 2: # 最后一次尝试仍失败
105
- raise
106
- self.logger.warning(f"插入重试中 [attempt {attempt + 1}]: {e}")
107
-
108
- except Exception as e:
109
- self.crawler.stats.inc_value('mongodb/insert_failed')
110
- self.logger.error(f"MongoDB操作最终失败: {e}")
111
- raise ItemDiscard(f"MongoDB操作失败: {e}")
112
-
113
- async def spider_closed(self):
114
- """资源清理"""
115
- if hasattr(self, 'client'):
116
- self.client.close()
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from motor.motor_asyncio import AsyncIOMotorClient
4
+ from pymongo.errors import PyMongoError
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.exceptions import ItemDiscard
7
+
8
+
9
+ class MongoPipeline:
10
+ def __init__(self, crawler):
11
+ self.crawler = crawler
12
+ self.settings = crawler.settings
13
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
14
+
15
+ # 初始化连接参数
16
+ self.client = None
17
+ self.db = None
18
+ self.collection = None
19
+
20
+ # 配置默认值
21
+ self.mongo_uri = self.settings.get('MONGO_URI', 'mongodb://localhost:27017')
22
+ self.db_name = self.settings.get('MONGO_DATABASE', 'scrapy_db')
23
+ self.collection_name = self.settings.get('MONGO_COLLECTION', crawler.spider.name)
24
+
25
+ # 注册关闭事件
26
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
27
+
28
+ @classmethod
29
+ def from_crawler(cls, crawler):
30
+ return cls(crawler)
31
+
32
+ async def _ensure_connection(self):
33
+ """确保连接已建立"""
34
+ if self.client is None:
35
+ self.client = AsyncIOMotorClient(self.mongo_uri)
36
+ self.db = self.client[self.db_name]
37
+ self.collection = self.db[self.collection_name]
38
+ self.logger.info(f"MongoDB连接建立 (集合: {self.collection_name})")
39
+
40
+ async def process_item(self, item, spider) -> Optional[dict]:
41
+ """处理item的核心方法"""
42
+ try:
43
+ await self._ensure_connection()
44
+
45
+ item_dict = dict(item)
46
+ result = await self.collection.insert_one(item_dict)
47
+
48
+ # 统计计数
49
+ self.crawler.stats.inc_value('mongodb/inserted')
50
+ self.logger.debug(f"插入文档ID: {result.inserted_id}")
51
+
52
+ return item
53
+
54
+ except Exception as e:
55
+ self.crawler.stats.inc_value('mongodb/failed')
56
+ self.logger.error(f"MongoDB插入失败: {e}")
57
+ raise ItemDiscard(f"MongoDB操作失败: {e}")
58
+
59
+ async def spider_closed(self):
60
+ """关闭爬虫时清理资源"""
61
+ if self.client:
62
+ self.client.close()
63
+ self.logger.info("MongoDB连接已关闭")
64
+
65
+
66
+ class MongoPoolPipeline:
67
+ def __init__(self, crawler):
68
+ self.crawler = crawler
69
+ self.settings = crawler.settings
70
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
71
+
72
+ # 连接池配置
73
+ self.client = AsyncIOMotorClient(
74
+ self.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
75
+ maxPoolSize=self.settings.getint('MONGO_MAX_POOL_SIZE', 100),
76
+ minPoolSize=self.settings.getint('MONGO_MIN_POOL_SIZE', 10),
77
+ connectTimeoutMS=5000,
78
+ socketTimeoutMS=30000
79
+ )
80
+
81
+ self.db = self.client[self.settings.get('MONGO_DATABASE', 'scrapy_db')]
82
+ self.collection = self.db[self.settings.get('MONGO_COLLECTION', crawler.spider.name)]
83
+
84
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
85
+ self.logger.info(f"MongoDB连接池已初始化 (集合: {self.collection.name})")
86
+
87
+ @classmethod
88
+ def create_instance(cls, crawler):
89
+ return cls(crawler)
90
+
91
+ async def process_item(self, item, spider) -> Optional[dict]:
92
+ """处理item方法(带重试机制)"""
93
+ try:
94
+ item_dict = dict(item)
95
+
96
+ # 带重试的插入操作
97
+ for attempt in range(3):
98
+ try:
99
+ result = await self.collection.insert_one(item_dict)
100
+ self.crawler.stats.inc_value('mongodb/insert_success')
101
+ self.logger.debug(f"插入成功 [attempt {attempt + 1}]: {result.inserted_id}")
102
+ return item
103
+ except PyMongoError as e:
104
+ if attempt == 2: # 最后一次尝试仍失败
105
+ raise
106
+ self.logger.warning(f"插入重试中 [attempt {attempt + 1}]: {e}")
107
+
108
+ except Exception as e:
109
+ self.crawler.stats.inc_value('mongodb/insert_failed')
110
+ self.logger.error(f"MongoDB操作最终失败: {e}")
111
+ raise ItemDiscard(f"MongoDB操作失败: {e}")
112
+
113
+ async def spider_closed(self):
114
+ """资源清理"""
115
+ if hasattr(self, 'client'):
116
+ self.client.close()
117
117
  self.logger.info("MongoDB连接池已释放")
@@ -1,134 +1,273 @@
1
- import asyncio
2
- import logging
3
- from crawlo.exceptions import NotConfiguredError
4
-
5
- logger = logging.getLogger(__name__)
6
-
7
-
8
- class AsyncmyMySQLPipeline:
9
- """
10
- 异步 MySQL 管道,用于批量将爬取的数据存储到 MySQL 数据库
11
- """
12
-
13
- def __init__(self, host, port, user, password, db, batch_size, table_name):
14
- self.host = host
15
- self.port = port
16
- self.user = user
17
- self.password = password
18
- self.db = db
19
- self.batch_size = batch_size
20
- self.table_name = table_name
21
- self.buffer = []
22
- self.pool = None
23
- self.flush_task = None
24
- self.insert_stmt = None
25
-
26
- @classmethod
27
- def from_crawler(cls, crawler):
28
- """从爬虫配置中获取数据库连接参数"""
29
- batch_size = crawler.settings.get_int('MYSQL_BATCH_SIZE', 100)
30
-
31
- return cls(
32
- host=crawler.settings.get('MYSQL_HOST'),
33
- port=crawler.settings.get('MYSQL_PORT'),
34
- user=crawler.settings.get('MYSQL_USER'),
35
- password=crawler.settings.get('MYSQL_PASSWORD'),
36
- db=crawler.settings.get('MYSQL_DB'),
37
- batch_size=batch_size,
38
- table_name=crawler.settings.get('MYSQL_TABLE'),
39
- )
40
-
41
- async def open_spider(self, spider):
42
- """爬虫启动时初始化数据库连接池"""
43
- try:
44
- import asyncmy
45
- self.pool = await asyncmy.create_pool(
46
- host=self.host,
47
- port=self.port,
48
- user=self.user,
49
- password=self.password,
50
- db=self.db,
51
- autocommit=True,
52
- charset='utf8mb4',
53
- cursorclass=asyncmy.cursors.DictCursor
54
- )
55
- logger.info(f"MySQL 连接池已创建,主机: {self.host}, 数据库: {self.db}")
56
-
57
- # 创建自动刷新任务
58
- self.flush_task = asyncio.create_task(self._auto_flush())
59
-
60
- except Exception as e:
61
- logger.error(f"无法创建 MySQL 连接池: {e}")
62
- raise NotConfiguredError(f"MySQL 连接失败: {e}")
63
-
64
- async def process_item(self, item, spider):
65
- """处理爬取的每个项目,添加到缓冲区"""
66
- self.buffer.append(dict(item))
67
-
68
- # 当缓冲区达到批量大小时自动刷新
69
- if len(self.buffer) >= self.batch_size:
70
- await self._flush_buffer()
71
-
72
- return item
73
-
74
- async def _flush_buffer(self):
75
- """将缓冲区中的数据批量写入数据库"""
76
- if not self.buffer:
77
- return
78
-
79
- try:
80
- async with self.pool.acquire() as conn:
81
- async with conn.cursor() as cursor:
82
- # 动态生成插入语句
83
- if not self.insert_stmt:
84
- columns = ', '.join(self.buffer[0].keys())
85
- placeholders = ', '.join(['%s'] * len(self.buffer[0]))
86
- self.insert_stmt = f"INSERT INTO {self.table_name} ({columns}) VALUES ({placeholders})"
87
-
88
- # 准备批量插入的数据
89
- values = [tuple(item.values()) for item in self.buffer]
90
-
91
- # 执行批量插入
92
- await cursor.executemany(self.insert_stmt, values)
93
- logger.debug(f"已批量插入 {len(values)} 条记录到 {self.table_name}")
94
-
95
- # 清空缓冲区
96
- self.buffer.clear()
97
-
98
- except Exception as e:
99
- logger.error(f"批量插入失败: {e}")
100
- # 发生错误时保留数据,避免数据丢失
101
- # 实际生产环境中可能需要更复杂的错误处理策略
102
-
103
- async def _auto_flush(self):
104
- """定期自动刷新缓冲区,防止数据长时间停留在内存中"""
105
- try:
106
- while True:
107
- await asyncio.sleep(10) # 每10秒检查一次
108
- if self.buffer:
109
- await self._flush_buffer()
110
- except asyncio.CancelledError:
111
- logger.info("自动刷新任务已取消")
112
-
113
- async def spider_closed(self, spider):
114
- """爬虫关闭时执行清理工作"""
115
- try:
116
- # 取消自动刷新任务
117
- if self.flush_task:
118
- self.flush_task.cancel()
119
- await self.flush_task # 等待任务完成取消
120
-
121
- # 确保缓冲区中的剩余数据被写入数据库
122
- if self.buffer:
123
- await self._flush_buffer()
124
-
125
- except asyncio.CancelledError:
126
- logger.info("爬虫关闭过程中自动刷新任务被取消")
127
- except Exception as e:
128
- logger.error(f"爬虫关闭时发生错误: {e}")
129
- finally:
130
- # 关闭数据库连接池
131
- if self.pool:
132
- self.pool.close()
133
- await self.pool.wait_closed()
134
- logger.info("MySQL 连接池已关闭")
1
+ # -*- coding: utf-8 -*-
2
+ import asyncio
3
+ import aiomysql
4
+ from typing import Optional, List, Dict
5
+ from asyncmy import create_pool
6
+ from crawlo.utils.log import get_logger
7
+ from crawlo.exceptions import ItemDiscard
8
+ from crawlo.utils.tools import make_insert_sql, logger
9
+
10
+
11
+ class AsyncmyMySQLPipeline:
12
+ def __init__(self, crawler):
13
+ self.crawler = crawler
14
+ self.settings = crawler.settings
15
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
16
+
17
+ # 配置参数
18
+ self.table_name = (
19
+ self.settings.get('MYSQL_TABLE') or
20
+ getattr(crawler.spider, 'mysql_table', None) or
21
+ f"{crawler.spider.name}_items"
22
+ )
23
+ self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
24
+ self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0) # 秒
25
+
26
+ # 连接池相关
27
+ self._pool_lock = asyncio.Lock()
28
+ self._pool_initialized = False
29
+ self.pool = None
30
+
31
+ # 缓冲区与锁
32
+ self.items_buffer: List[Dict] = []
33
+ self.buffer_lock = asyncio.Lock()
34
+
35
+ # 后台任务
36
+ self.flush_task: Optional[asyncio.Task] = None
37
+
38
+ # 注册关闭事件
39
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
40
+
41
+ @classmethod
42
+ def from_crawler(cls, crawler):
43
+ return cls(crawler)
44
+
45
+ async def _ensure_pool(self):
46
+ """确保连接池已初始化(线程安全)"""
47
+ if self._pool_initialized:
48
+ return
49
+
50
+ async with self._pool_lock:
51
+ if not self._pool_initialized:
52
+ try:
53
+ self.pool = await create_pool(
54
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
55
+ port=self.settings.get_int('MYSQL_PORT', 3306),
56
+ user=self.settings.get('MYSQL_USER', 'root'),
57
+ password=self.settings.get('MYSQL_PASSWORD', ''),
58
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
59
+ minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
60
+ maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
61
+ echo=self.settings.get_bool('MYSQL_ECHO', False)
62
+ )
63
+ self._pool_initialized = True
64
+ self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
65
+ except Exception as e:
66
+ self.logger.error(f"MySQL连接池初始化失败: {e}")
67
+ raise
68
+
69
+ async def open_spider(self, spider):
70
+ """爬虫启动时初始化后台刷新任务"""
71
+ await self._ensure_pool()
72
+ self.flush_task = asyncio.create_task(self._flush_loop())
73
+
74
+ async def _flush_loop(self):
75
+ """后台循环:定期检查是否需要刷新缓冲区"""
76
+ while True:
77
+ await asyncio.sleep(self.flush_interval)
78
+ if len(self.items_buffer) > 0:
79
+ await self._flush_buffer()
80
+
81
+ async def _flush_buffer(self):
82
+ """将缓冲区中的数据批量写入数据库"""
83
+ async with self.buffer_lock:
84
+ if not self.items_buffer:
85
+ return
86
+
87
+ items_to_insert = self.items_buffer.copy()
88
+ self.items_buffer.clear()
89
+
90
+ try:
91
+ await self._ensure_pool()
92
+ first_item = items_to_insert[0]
93
+ sql = make_insert_sql(table=self.table_name, data=first_item, many=True)
94
+
95
+ values = [list(item.values()) for item in items_to_insert]
96
+
97
+ async with self.pool.acquire() as conn:
98
+ async with conn.cursor() as cursor:
99
+ affected_rows = await cursor.executemany(sql, values)
100
+ await conn.commit()
101
+
102
+ spider_name = getattr(self.crawler.spider, 'name', 'unknown')
103
+ self.logger.info(f"批量插入 {affected_rows} 条记录到 {self.table_name}")
104
+ self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
105
+
106
+ except Exception as e:
107
+ self.logger.error(f"批量插入失败: {e}")
108
+ self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
109
+ # 可选:重试或丢弃
110
+ raise ItemDiscard(f"批量插入失败: {e}")
111
+
112
+ async def process_item(self, item, spider, kwargs=None) -> dict:
113
+ """将 item 添加到缓冲区,触发批量插入"""
114
+ item_dict = dict(item)
115
+
116
+ async with self.buffer_lock:
117
+ self.items_buffer.append(item_dict)
118
+ if len(self.items_buffer) >= self.batch_size:
119
+ # 达到批量阈值,立即刷新
120
+ await self._flush_buffer()
121
+
122
+ return item
123
+
124
+ async def spider_closed(self):
125
+ """关闭爬虫时,确保所有剩余数据被写入"""
126
+ if self.flush_task:
127
+ self.flush_task.cancel()
128
+ try:
129
+ await self.flush_task
130
+ except asyncio.CancelledError:
131
+ pass
132
+
133
+ # 刷最后一批数据
134
+ if self.items_buffer:
135
+ await self._flush_buffer()
136
+
137
+ # 关闭连接池
138
+ if self.pool:
139
+ self.pool.close()
140
+ await self.pool.wait_closed()
141
+ self.logger.info("MySQL连接池已关闭")
142
+
143
+
144
+ class AiomysqlMySQLPipeline:
145
+ def __init__(self, crawler):
146
+ self.crawler = crawler
147
+ self.settings = crawler.settings
148
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
149
+
150
+ # 配置
151
+ self.table_name = (
152
+ self.settings.get('MYSQL_TABLE') or
153
+ getattr(crawler.spider, 'mysql_table', None) or
154
+ f"{crawler.spider.name}_items"
155
+ )
156
+ self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
157
+ self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0)
158
+
159
+ # 连接池
160
+ self._pool_lock = asyncio.Lock()
161
+ self._pool_initialized = False
162
+ self.pool = None
163
+
164
+ # 缓冲
165
+ self.items_buffer: List[Dict] = []
166
+ self.buffer_lock = asyncio.Lock()
167
+
168
+ # 后台任务
169
+ self.flush_task: Optional[asyncio.Task] = None
170
+
171
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
172
+
173
+ @classmethod
174
+ def create_instance(cls, crawler):
175
+ return cls(crawler)
176
+
177
+ async def _init_pool(self):
178
+ """延迟初始化连接池(线程安全)"""
179
+ if self._pool_initialized:
180
+ return
181
+
182
+ async with self._pool_lock:
183
+ if not self._pool_initialized:
184
+ try:
185
+ self.pool = await aiomysql.create_pool(
186
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
187
+ port=self.settings.getint('MYSQL_PORT', 3306),
188
+ user=self.settings.get('MYSQL_USER', 'root'),
189
+ password=self.settings.get('MYSQL_PASSWORD', ''),
190
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
191
+ minsize=self.settings.getint('MYSQL_POOL_MIN', 3),
192
+ maxsize=self.settings.getint('MYSQL_POOL_MAX', 10),
193
+ cursorclass=aiomysql.DictCursor,
194
+ autocommit=False
195
+ )
196
+ self._pool_initialized = True
197
+ self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
198
+ except Exception as e:
199
+ self.logger.error(f"aiomysql连接池初始化失败: {e}")
200
+ raise
201
+
202
+ async def open_spider(self, spider):
203
+ """爬虫启动时创建后台刷新任务"""
204
+ await self._init_pool()
205
+ self.flush_task = asyncio.create_task(self._flush_loop())
206
+
207
+ async def _flush_loop(self):
208
+ """定期刷新缓冲区"""
209
+ while True:
210
+ await asyncio.sleep(self.flush_interval)
211
+ if len(self.items_buffer) > 0:
212
+ await self._flush_buffer()
213
+
214
+ async def _flush_buffer(self):
215
+ """执行批量插入"""
216
+ async with self.buffer_lock:
217
+ if not self.items_buffer:
218
+ return
219
+ items_to_insert = self.items_buffer.copy()
220
+ self.items_buffer.clear()
221
+
222
+ try:
223
+ await self._init_pool()
224
+ keys = items_to_insert[0].keys()
225
+ placeholders = ', '.join(['%s'] * len(keys))
226
+ columns = ', '.join([f'`{k}`' for k in keys])
227
+ sql = f"INSERT INTO `{self.table_name}` ({columns}) VALUES ({placeholders})"
228
+
229
+ values = [list(item.values()) for item in items_to_insert]
230
+
231
+ async with self.pool.acquire() as conn:
232
+ async with conn.cursor() as cursor:
233
+ result = await cursor.executemany(sql, values)
234
+ await conn.commit()
235
+
236
+ spider_name = getattr(self.crawler.spider, 'name', 'unknown')
237
+ self.logger.info(f"【{spider_name}】批量插入 {result} 条记录到 {self.table_name}")
238
+ self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
239
+
240
+ except aiomysql.Error as e:
241
+ self.logger.error(f"aiomysql批量插入失败: {e}")
242
+ self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
243
+ raise ItemDiscard(f"MySQL错误: {e.args[1]}")
244
+ except Exception as e:
245
+ self.logger.error(f"未知错误: {e}")
246
+ raise ItemDiscard(f"处理失败: {e}")
247
+
248
+ async def process_item(self, item, spider) -> dict:
249
+ item_dict = dict(item)
250
+
251
+ async with self.buffer_lock:
252
+ self.items_buffer.append(item_dict)
253
+ if len(self.items_buffer) >= self.batch_size:
254
+ await self._flush_buffer()
255
+
256
+ return item
257
+
258
+ async def spider_closed(self):
259
+ """清理资源并提交剩余数据"""
260
+ if self.flush_task:
261
+ self.flush_task.cancel()
262
+ try:
263
+ await self.flush_task
264
+ except asyncio.CancelledError:
265
+ pass
266
+
267
+ if self.items_buffer:
268
+ await self._flush_buffer()
269
+
270
+ if self.pool:
271
+ self.pool.close()
272
+ await self.pool.wait_closed()
273
+ self.logger.info("aiomysql连接池已释放")