crawlo 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (128) hide show
  1. crawlo/__init__.py +34 -33
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +152 -126
  7. crawlo/commands/list.py +156 -147
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -111
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -57
  18. crawlo/crawler.py +1028 -495
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +251 -241
  22. crawlo/downloader/httpx_downloader.py +259 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +271 -166
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +317 -0
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +219 -0
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/__init__.py +0 -0
  62. crawlo/queue/pqueue.py +37 -0
  63. crawlo/queue/queue_manager.py +308 -0
  64. crawlo/queue/redis_priority_queue.py +209 -0
  65. crawlo/settings/__init__.py +7 -7
  66. crawlo/settings/default_settings.py +245 -167
  67. crawlo/settings/setting_manager.py +99 -99
  68. crawlo/spider/__init__.py +639 -129
  69. crawlo/stats_collector.py +59 -59
  70. crawlo/subscriber.py +106 -106
  71. crawlo/task_manager.py +30 -27
  72. crawlo/templates/crawlo.cfg.tmpl +10 -10
  73. crawlo/templates/project/__init__.py.tmpl +3 -3
  74. crawlo/templates/project/items.py.tmpl +17 -17
  75. crawlo/templates/project/middlewares.py.tmpl +87 -76
  76. crawlo/templates/project/pipelines.py.tmpl +342 -64
  77. crawlo/templates/project/run.py.tmpl +252 -0
  78. crawlo/templates/project/settings.py.tmpl +251 -54
  79. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  80. crawlo/templates/spider/spider.py.tmpl +178 -32
  81. crawlo/utils/__init__.py +7 -7
  82. crawlo/utils/controlled_spider_mixin.py +440 -0
  83. crawlo/utils/date_tools.py +233 -233
  84. crawlo/utils/db_helper.py +343 -343
  85. crawlo/utils/func_tools.py +82 -82
  86. crawlo/utils/large_scale_config.py +287 -0
  87. crawlo/utils/large_scale_helper.py +344 -0
  88. crawlo/utils/log.py +128 -128
  89. crawlo/utils/queue_helper.py +176 -0
  90. crawlo/utils/request.py +267 -267
  91. crawlo/utils/request_serializer.py +220 -0
  92. crawlo/utils/spider_loader.py +62 -62
  93. crawlo/utils/system.py +11 -11
  94. crawlo/utils/tools.py +4 -4
  95. crawlo/utils/url.py +39 -39
  96. crawlo-1.1.3.dist-info/METADATA +635 -0
  97. crawlo-1.1.3.dist-info/RECORD +113 -0
  98. examples/__init__.py +7 -7
  99. examples/controlled_spider_example.py +205 -0
  100. tests/__init__.py +7 -7
  101. tests/test_final_validation.py +154 -0
  102. tests/test_proxy_health_check.py +32 -32
  103. tests/test_proxy_middleware_integration.py +136 -136
  104. tests/test_proxy_providers.py +56 -56
  105. tests/test_proxy_stats.py +19 -19
  106. tests/test_proxy_strategies.py +59 -59
  107. tests/test_redis_config.py +29 -0
  108. tests/test_redis_queue.py +225 -0
  109. tests/test_request_serialization.py +71 -0
  110. tests/test_scheduler.py +242 -0
  111. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  112. crawlo/utils/pqueue.py +0 -174
  113. crawlo-1.1.1.dist-info/METADATA +0 -220
  114. crawlo-1.1.1.dist-info/RECORD +0 -100
  115. examples/baidu_spider/__init__.py +0 -7
  116. examples/baidu_spider/demo.py +0 -94
  117. examples/baidu_spider/items.py +0 -46
  118. examples/baidu_spider/middleware.py +0 -49
  119. examples/baidu_spider/pipeline.py +0 -55
  120. examples/baidu_spider/run.py +0 -27
  121. examples/baidu_spider/settings.py +0 -121
  122. examples/baidu_spider/spiders/__init__.py +0 -7
  123. examples/baidu_spider/spiders/bai_du.py +0 -61
  124. examples/baidu_spider/spiders/miit.py +0 -159
  125. examples/baidu_spider/spiders/sina.py +0 -79
  126. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  127. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  128. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,195 +1,195 @@
1
- # -*- coding: utf-8 -*-
2
- import asyncio
3
- import aiomysql
4
- from typing import Optional
5
- from asyncmy import create_pool
6
- from crawlo.utils.log import get_logger
7
- from crawlo.exceptions import ItemDiscard
8
- from crawlo.utils.db_helper import make_insert_sql, logger
9
-
10
-
11
- class AsyncmyMySQLPipeline:
12
- def __init__(self, crawler):
13
- self.crawler = crawler
14
- self.settings = crawler.settings
15
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
16
-
17
- # 使用异步锁和初始化标志确保线程安全
18
- self._pool_lock = asyncio.Lock()
19
- self._pool_initialized = False
20
- self.pool = None
21
- self.table_name = (
22
- self.settings.get('MYSQL_TABLE') or
23
- getattr(crawler.spider, 'mysql_table', None) or
24
- f"{crawler.spider.name}_items"
25
- )
26
-
27
- # 注册关闭事件
28
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
29
-
30
- @classmethod
31
- def from_crawler(cls, crawler):
32
- return cls(crawler)
33
-
34
- async def _ensure_pool(self):
35
- """确保连接池已初始化(线程安全)"""
36
- if self._pool_initialized:
37
- return
38
-
39
- async with self._pool_lock:
40
- if not self._pool_initialized: # 双重检查避免竞争条件
41
- try:
42
- self.pool = await create_pool(
43
- host=self.settings.get('MYSQL_HOST', 'localhost'),
44
- port=self.settings.get_int('MYSQL_PORT', 3306),
45
- user=self.settings.get('MYSQL_USER', 'root'),
46
- password=self.settings.get('MYSQL_PASSWORD', ''),
47
- db=self.settings.get('MYSQL_DB', 'scrapy_db'),
48
- minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
49
- maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
50
- echo=self.settings.get_bool('MYSQL_ECHO', False)
51
- )
52
- self._pool_initialized = True
53
- self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
54
- except Exception as e:
55
- self.logger.error(f"MySQL连接池初始化失败: {e}")
56
- raise
57
-
58
- async def process_item(self, item, spider, kwargs=None) -> Optional[dict]:
59
- """处理item的核心方法"""
60
- kwargs = kwargs or {}
61
- spider_name = getattr(spider, 'name', 'unknown') # 获取爬虫名称
62
- try:
63
- await self._ensure_pool()
64
- item_dict = dict(item)
65
- sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
66
-
67
- rowcount = await self._execute_sql(sql=sql)
68
- if rowcount > 1:
69
- self.logger.info(
70
- f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
71
- )
72
- elif rowcount == 1:
73
- self.logger.debug(
74
- f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
75
- )
76
- else:
77
- self.logger.warning(
78
- f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
79
- )
80
-
81
- return item
82
-
83
- except Exception as e:
84
- self.logger.error(f"处理item时发生错误: {e}")
85
- raise ItemDiscard(f"处理失败: {e}")
86
-
87
- async def _execute_sql(self, sql: str, values: list = None) -> int:
88
- """执行SQL语句并处理结果"""
89
- async with self.pool.acquire() as conn:
90
- async with conn.cursor() as cursor:
91
- try:
92
- # 根据是否有参数值选择不同的执行方法
93
- if values is not None:
94
- rowcount = await cursor.execute(sql, values)
95
- else:
96
- rowcount = await cursor.execute(sql)
97
-
98
- await conn.commit()
99
- self.crawler.stats.inc_value('mysql/insert_success')
100
- return rowcount
101
- except Exception as e:
102
- await conn.rollback()
103
- self.crawler.stats.inc_value('mysql/insert_failed')
104
- raise ItemDiscard(f"MySQL插入失败: {e}")
105
-
106
- async def spider_closed(self):
107
- """关闭爬虫时清理资源"""
108
- if self.pool:
109
- self.pool.close()
110
- await self.pool.wait_closed()
111
- self.logger.info("MySQL连接池已关闭")
112
-
113
-
114
- class AiomysqlMySQLPipeline:
115
- def __init__(self, crawler):
116
- self.crawler = crawler
117
- self.settings = crawler.settings
118
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
119
-
120
- # 使用异步锁和初始化标志
121
- self._pool_lock = asyncio.Lock()
122
- self._pool_initialized = False
123
- self.pool = None
124
- self.table_name = (
125
- self.settings.get('MYSQL_TABLE') or
126
- getattr(crawler.spider, 'mysql_table', None) or
127
- f"{crawler.spider.name}_items"
128
- )
129
-
130
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
131
-
132
- @classmethod
133
- def create_instance(cls, crawler):
134
- return cls(crawler)
135
-
136
- async def _init_pool(self):
137
- """延迟初始化连接池(线程安全)"""
138
- if self._pool_initialized:
139
- return
140
-
141
- async with self._pool_lock:
142
- if not self._pool_initialized:
143
- try:
144
- self.pool = await aiomysql.create_pool(
145
- host=self.settings.get('MYSQL_HOST', 'localhost'),
146
- port=self.settings.getint('MYSQL_PORT', 3306),
147
- user=self.settings.get('MYSQL_USER', 'root'),
148
- password=self.settings.get('MYSQL_PASSWORD', ''),
149
- db=self.settings.get('MYSQL_DB', 'scrapy_db'),
150
- minsize=self.settings.getint('MYSQL_POOL_MIN', 2),
151
- maxsize=self.settings.getint('MYSQL_POOL_MAX', 5),
152
- cursorclass=aiomysql.DictCursor,
153
- autocommit=False
154
- )
155
- self._pool_initialized = True
156
- self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
157
- except Exception as e:
158
- self.logger.error(f"aiomysql连接池初始化失败: {e}")
159
- raise
160
-
161
- async def process_item(self, item, spider) -> Optional[dict]:
162
- """处理item方法"""
163
- try:
164
- await self._init_pool()
165
-
166
- item_dict = dict(item)
167
- sql = f"""
168
- INSERT INTO `{self.table_name}`
169
- ({', '.join([f'`{k}`' for k in item_dict.keys()])})
170
- VALUES ({', '.join(['%s'] * len(item_dict))})
171
- """
172
-
173
- async with self.pool.acquire() as conn:
174
- async with conn.cursor() as cursor:
175
- try:
176
- await cursor.execute(sql, list(item_dict.values()))
177
- await conn.commit()
178
- self.crawler.stats.inc_value('mysql/insert_success')
179
- except aiomysql.Error as e:
180
- await conn.rollback()
181
- self.crawler.stats.inc_value('mysql/insert_failed')
182
- raise ItemDiscard(f"MySQL错误: {e.args[1]}")
183
-
184
- return item
185
-
186
- except Exception as e:
187
- self.logger.error(f"Pipeline处理异常: {e}")
188
- raise ItemDiscard(f"处理失败: {e}")
189
-
190
- async def spider_closed(self):
191
- """资源清理"""
192
- if self.pool:
193
- self.pool.close()
194
- await self.pool.wait_closed()
195
- self.logger.info("aiomysql连接池已释放")
1
+ # -*- coding: utf-8 -*-
2
+ import asyncio
3
+ import aiomysql
4
+ from typing import Optional
5
+ from asyncmy import create_pool
6
+ from crawlo.utils.log import get_logger
7
+ from crawlo.exceptions import ItemDiscard
8
+ from crawlo.utils.db_helper import make_insert_sql, logger
9
+
10
+
11
+ class AsyncmyMySQLPipeline:
12
+ def __init__(self, crawler):
13
+ self.crawler = crawler
14
+ self.settings = crawler.settings
15
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
16
+
17
+ # 使用异步锁和初始化标志确保线程安全
18
+ self._pool_lock = asyncio.Lock()
19
+ self._pool_initialized = False
20
+ self.pool = None
21
+ self.table_name = (
22
+ self.settings.get('MYSQL_TABLE') or
23
+ getattr(crawler.spider, 'mysql_table', None) or
24
+ f"{crawler.spider.name}_items"
25
+ )
26
+
27
+ # 注册关闭事件
28
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
29
+
30
+ @classmethod
31
+ def from_crawler(cls, crawler):
32
+ return cls(crawler)
33
+
34
+ async def _ensure_pool(self):
35
+ """确保连接池已初始化(线程安全)"""
36
+ if self._pool_initialized:
37
+ return
38
+
39
+ async with self._pool_lock:
40
+ if not self._pool_initialized: # 双重检查避免竞争条件
41
+ try:
42
+ self.pool = await create_pool(
43
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
44
+ port=self.settings.get_int('MYSQL_PORT', 3306),
45
+ user=self.settings.get('MYSQL_USER', 'root'),
46
+ password=self.settings.get('MYSQL_PASSWORD', ''),
47
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
48
+ minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
49
+ maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
50
+ echo=self.settings.get_bool('MYSQL_ECHO', False)
51
+ )
52
+ self._pool_initialized = True
53
+ self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
54
+ except Exception as e:
55
+ self.logger.error(f"MySQL连接池初始化失败: {e}")
56
+ raise
57
+
58
+ async def process_item(self, item, spider, kwargs=None) -> Optional[dict]:
59
+ """处理item的核心方法"""
60
+ kwargs = kwargs or {}
61
+ spider_name = getattr(spider, 'name', 'unknown') # 获取爬虫名称
62
+ try:
63
+ await self._ensure_pool()
64
+ item_dict = dict(item)
65
+ sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
66
+
67
+ rowcount = await self._execute_sql(sql=sql)
68
+ if rowcount > 1:
69
+ self.logger.info(
70
+ f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
71
+ )
72
+ elif rowcount == 1:
73
+ self.logger.debug(
74
+ f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
75
+ )
76
+ else:
77
+ self.logger.warning(
78
+ f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
79
+ )
80
+
81
+ return item
82
+
83
+ except Exception as e:
84
+ self.logger.error(f"处理item时发生错误: {e}")
85
+ raise ItemDiscard(f"处理失败: {e}")
86
+
87
+ async def _execute_sql(self, sql: str, values: list = None) -> int:
88
+ """执行SQL语句并处理结果"""
89
+ async with self.pool.acquire() as conn:
90
+ async with conn.cursor() as cursor:
91
+ try:
92
+ # 根据是否有参数值选择不同的执行方法
93
+ if values is not None:
94
+ rowcount = await cursor.execute(sql, values)
95
+ else:
96
+ rowcount = await cursor.execute(sql)
97
+
98
+ await conn.commit()
99
+ self.crawler.stats.inc_value('mysql/insert_success')
100
+ return rowcount
101
+ except Exception as e:
102
+ await conn.rollback()
103
+ self.crawler.stats.inc_value('mysql/insert_failed')
104
+ raise ItemDiscard(f"MySQL插入失败: {e}")
105
+
106
+ async def spider_closed(self):
107
+ """关闭爬虫时清理资源"""
108
+ if self.pool:
109
+ self.pool.close()
110
+ await self.pool.wait_closed()
111
+ self.logger.info("MySQL连接池已关闭")
112
+
113
+
114
+ class AiomysqlMySQLPipeline:
115
+ def __init__(self, crawler):
116
+ self.crawler = crawler
117
+ self.settings = crawler.settings
118
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
119
+
120
+ # 使用异步锁和初始化标志
121
+ self._pool_lock = asyncio.Lock()
122
+ self._pool_initialized = False
123
+ self.pool = None
124
+ self.table_name = (
125
+ self.settings.get('MYSQL_TABLE') or
126
+ getattr(crawler.spider, 'mysql_table', None) or
127
+ f"{crawler.spider.name}_items"
128
+ )
129
+
130
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
131
+
132
+ @classmethod
133
+ def create_instance(cls, crawler):
134
+ return cls(crawler)
135
+
136
+ async def _init_pool(self):
137
+ """延迟初始化连接池(线程安全)"""
138
+ if self._pool_initialized:
139
+ return
140
+
141
+ async with self._pool_lock:
142
+ if not self._pool_initialized:
143
+ try:
144
+ self.pool = await aiomysql.create_pool(
145
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
146
+ port=self.settings.getint('MYSQL_PORT', 3306),
147
+ user=self.settings.get('MYSQL_USER', 'root'),
148
+ password=self.settings.get('MYSQL_PASSWORD', ''),
149
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
150
+ minsize=self.settings.getint('MYSQL_POOL_MIN', 2),
151
+ maxsize=self.settings.getint('MYSQL_POOL_MAX', 5),
152
+ cursorclass=aiomysql.DictCursor,
153
+ autocommit=False
154
+ )
155
+ self._pool_initialized = True
156
+ self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
157
+ except Exception as e:
158
+ self.logger.error(f"aiomysql连接池初始化失败: {e}")
159
+ raise
160
+
161
+ async def process_item(self, item, spider) -> Optional[dict]:
162
+ """处理item方法"""
163
+ try:
164
+ await self._init_pool()
165
+
166
+ item_dict = dict(item)
167
+ sql = f"""
168
+ INSERT INTO `{self.table_name}`
169
+ ({', '.join([f'`{k}`' for k in item_dict.keys()])})
170
+ VALUES ({', '.join(['%s'] * len(item_dict))})
171
+ """
172
+
173
+ async with self.pool.acquire() as conn:
174
+ async with conn.cursor() as cursor:
175
+ try:
176
+ await cursor.execute(sql, list(item_dict.values()))
177
+ await conn.commit()
178
+ self.crawler.stats.inc_value('mysql/insert_success')
179
+ except aiomysql.Error as e:
180
+ await conn.rollback()
181
+ self.crawler.stats.inc_value('mysql/insert_failed')
182
+ raise ItemDiscard(f"MySQL错误: {e.args[1]}")
183
+
184
+ return item
185
+
186
+ except Exception as e:
187
+ self.logger.error(f"Pipeline处理异常: {e}")
188
+ raise ItemDiscard(f"处理失败: {e}")
189
+
190
+ async def spider_closed(self):
191
+ """资源清理"""
192
+ if self.pool:
193
+ self.pool.close()
194
+ await self.pool.wait_closed()
195
+ self.logger.info("aiomysql连接池已释放")
@@ -1,56 +1,56 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import List
4
- from pprint import pformat
5
- from asyncio import create_task
6
-
7
-
8
- from crawlo.utils.log import get_logger
9
- from crawlo.event import item_successful, item_discard
10
- from crawlo.project import load_class, common_call
11
- from crawlo.exceptions import PipelineInitError, ItemDiscard, InvalidOutputError
12
-
13
-
14
- class PipelineManager:
15
-
16
- def __init__(self, crawler):
17
- self.crawler = crawler
18
- self.pipelines: List = []
19
- self.methods: List = []
20
-
21
- self.logger = get_logger(self.__class__.__name__, self.crawler.settings.get('LOG_LEVEL'))
22
- pipelines = self.crawler.settings.get_list('PIPELINES')
23
- self._add_pipelines(pipelines)
24
- self._add_methods()
25
-
26
- @classmethod
27
- def from_crawler(cls, *args, **kwargs):
28
- o = cls(*args, **kwargs)
29
- return o
30
-
31
- def _add_pipelines(self, pipelines):
32
- for pipeline in pipelines:
33
- pipeline_cls = load_class(pipeline)
34
- if not hasattr(pipeline_cls, 'from_crawler'):
35
- raise PipelineInitError(
36
- f"Pipeline init failed, must inherit from `BasePipeline` or have a `create_instance` method"
37
- )
38
- self.pipelines.append(pipeline_cls.from_crawler(self.crawler))
39
- if pipelines:
40
- self.logger.info(f"enabled pipelines: \n {pformat(pipelines)}")
41
-
42
- def _add_methods(self):
43
- for pipeline in self.pipelines:
44
- if hasattr(pipeline, 'process_item'):
45
- self.methods.append(pipeline.process_item)
46
-
47
- async def process_item(self, item):
48
- try:
49
- for method in self.methods:
50
- item = await common_call(method, item, self.crawler.spider)
51
- if item is None:
52
- raise InvalidOutputError(f"{method.__qualname__} return None is not supported.")
53
- except ItemDiscard as exc:
54
- create_task(self.crawler.subscriber.notify(item_discard, item, exc, self.crawler.spider))
55
- else:
56
- create_task(self.crawler.subscriber.notify(item_successful, item, self.crawler.spider))
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import List
4
+ from pprint import pformat
5
+ from asyncio import create_task
6
+
7
+
8
+ from crawlo.utils.log import get_logger
9
+ from crawlo.event import item_successful, item_discard
10
+ from crawlo.project import load_class, common_call
11
+ from crawlo.exceptions import PipelineInitError, ItemDiscard, InvalidOutputError
12
+
13
+
14
+ class PipelineManager:
15
+
16
+ def __init__(self, crawler):
17
+ self.crawler = crawler
18
+ self.pipelines: List = []
19
+ self.methods: List = []
20
+
21
+ self.logger = get_logger(self.__class__.__name__, self.crawler.settings.get('LOG_LEVEL'))
22
+ pipelines = self.crawler.settings.get_list('PIPELINES')
23
+ self._add_pipelines(pipelines)
24
+ self._add_methods()
25
+
26
+ @classmethod
27
+ def from_crawler(cls, *args, **kwargs):
28
+ o = cls(*args, **kwargs)
29
+ return o
30
+
31
+ def _add_pipelines(self, pipelines):
32
+ for pipeline in pipelines:
33
+ pipeline_cls = load_class(pipeline)
34
+ if not hasattr(pipeline_cls, 'from_crawler'):
35
+ raise PipelineInitError(
36
+ f"Pipeline init failed, must inherit from `BasePipeline` or have a `create_instance` method"
37
+ )
38
+ self.pipelines.append(pipeline_cls.from_crawler(self.crawler))
39
+ if pipelines:
40
+ self.logger.info(f"enabled pipelines: \n {pformat(pipelines)}")
41
+
42
+ def _add_methods(self):
43
+ for pipeline in self.pipelines:
44
+ if hasattr(pipeline, 'process_item'):
45
+ self.methods.append(pipeline.process_item)
46
+
47
+ async def process_item(self, item):
48
+ try:
49
+ for method in self.methods:
50
+ item = await common_call(method, item, self.crawler.spider)
51
+ if item is None:
52
+ raise InvalidOutputError(f"{method.__qualname__} return None is not supported.")
53
+ except ItemDiscard as exc:
54
+ create_task(self.crawler.subscriber.notify(item_discard, item, exc, self.crawler.spider))
55
+ else:
56
+ create_task(self.crawler.subscriber.notify(item_successful, item, self.crawler.spider))
@@ -0,0 +1,163 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 基于 Redis 的数据项去重管道
5
+ ========================
6
+ 提供分布式环境下的数据项去重功能,防止保存重复的数据记录。
7
+
8
+ 特点:
9
+ - 分布式支持: 多节点共享去重数据
10
+ - 高性能: 使用 Redis 集合进行快速查找
11
+ - 可配置: 支持自定义 Redis 连接参数
12
+ - 容错设计: 网络异常时不会丢失数据
13
+ """
14
+
15
+ import hashlib
16
+ from typing import Dict, Any, Optional
17
+ import redis
18
+
19
+ from crawlo import Item
20
+ from crawlo.spider import Spider
21
+ from crawlo.utils.log import get_logger
22
+ from crawlo.exceptions import DropItem
23
+
24
+
25
+ class RedisDedupPipeline:
26
+ """基于 Redis 的数据项去重管道"""
27
+
28
+ def __init__(
29
+ self,
30
+ redis_host: str = 'localhost',
31
+ redis_port: int = 6379,
32
+ redis_db: int = 0,
33
+ redis_password: Optional[str] = None,
34
+ redis_key: str = 'crawlo:item_fingerprints',
35
+ log_level: str = "INFO"
36
+ ):
37
+ """
38
+ 初始化 Redis 去重管道
39
+
40
+ :param redis_host: Redis 主机地址
41
+ :param redis_port: Redis 端口
42
+ :param redis_db: Redis 数据库编号
43
+ :param redis_password: Redis 密码
44
+ :param redis_key: 存储指纹的 Redis 键名
45
+ :param log_level: 日志级别
46
+ """
47
+ self.logger = get_logger(self.__class__.__name__, log_level)
48
+
49
+ # 初始化 Redis 连接
50
+ try:
51
+ self.redis_client = redis.Redis(
52
+ host=redis_host,
53
+ port=redis_port,
54
+ db=redis_db,
55
+ password=redis_password,
56
+ decode_responses=True,
57
+ socket_connect_timeout=5,
58
+ socket_timeout=5
59
+ )
60
+ # 测试连接
61
+ self.redis_client.ping()
62
+ self.logger.info(f"Redis 连接成功: {redis_host}:{redis_port}/{redis_db}")
63
+ except Exception as e:
64
+ self.logger.error(f"Redis 连接失败: {e}")
65
+ raise RuntimeError(f"Redis 连接失败: {e}")
66
+
67
+ self.redis_key = redis_key
68
+ self.dropped_count = 0
69
+
70
+ @classmethod
71
+ def from_crawler(cls, crawler):
72
+ """从爬虫配置创建管道实例"""
73
+ settings = crawler.settings
74
+
75
+ return cls(
76
+ redis_host=settings.get('REDIS_HOST', 'localhost'),
77
+ redis_port=settings.getint('REDIS_PORT', 6379),
78
+ redis_db=settings.getint('REDIS_DB', 0),
79
+ redis_password=settings.get('REDIS_PASSWORD') or None,
80
+ redis_key=settings.get('REDIS_DEDUP_KEY', 'crawlo:item_fingerprints'),
81
+ log_level=settings.get('LOG_LEVEL', 'INFO')
82
+ )
83
+
84
+ def process_item(self, item: Item, spider: Spider) -> Item:
85
+ """
86
+ 处理数据项,进行去重检查
87
+
88
+ :param item: 要处理的数据项
89
+ :param spider: 爬虫实例
90
+ :return: 处理后的数据项或抛出 DropItem 异常
91
+ """
92
+ try:
93
+ # 生成数据项指纹
94
+ fingerprint = self._generate_item_fingerprint(item)
95
+
96
+ # 使用 Redis 的 SADD 命令检查并添加指纹
97
+ # 如果指纹已存在,SADD 返回 0;如果指纹是新的,SADD 返回 1
98
+ is_new = self.redis_client.sadd(self.redis_key, fingerprint)
99
+
100
+ if not is_new:
101
+ # 如果指纹已存在,丢弃这个数据项
102
+ self.dropped_count += 1
103
+ self.logger.debug(f"丢弃重复数据项: {fingerprint[:20]}...")
104
+ raise DropItem(f"重复的数据项: {fingerprint}")
105
+ else:
106
+ # 如果是新数据项,继续处理
107
+ self.logger.debug(f"处理新数据项: {fingerprint[:20]}...")
108
+ return item
109
+
110
+ except redis.RedisError as e:
111
+ self.logger.error(f"Redis 错误: {e}")
112
+ # 在 Redis 错误时继续处理,避免丢失数据
113
+ return item
114
+ except Exception as e:
115
+ self.logger.error(f"处理数据项时出错: {e}")
116
+ # 在其他错误时继续处理
117
+ return item
118
+
119
+ def _generate_item_fingerprint(self, item: Item) -> str:
120
+ """
121
+ 生成数据项指纹
122
+
123
+ 基于数据项的所有字段生成唯一指纹,用于去重判断。
124
+
125
+ :param item: 数据项
126
+ :return: 指纹字符串
127
+ """
128
+ # 将数据项转换为可序列化的字典
129
+ try:
130
+ item_dict = item.to_dict()
131
+ except AttributeError:
132
+ # 兼容没有to_dict方法的Item实现
133
+ item_dict = dict(item)
134
+
135
+ # 对字典进行排序以确保一致性
136
+ sorted_items = sorted(item_dict.items())
137
+
138
+ # 生成指纹字符串
139
+ fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
140
+
141
+ # 使用 SHA256 生成固定长度的指纹
142
+ return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
143
+
144
+ def close_spider(self, spider: Spider) -> None:
145
+ """
146
+ 爬虫关闭时的清理工作
147
+
148
+ :param spider: 爬虫实例
149
+ """
150
+ try:
151
+ # 获取去重统计信息
152
+ total_items = self.redis_client.scard(self.redis_key)
153
+ self.logger.info(f"爬虫 {spider.name} 关闭:")
154
+ self.logger.info(f" - 丢弃的重复数据项: {self.dropped_count}")
155
+ self.logger.info(f" - Redis 中存储的指纹数: {total_items}")
156
+
157
+ # 注意:默认情况下不清理 Redis 中的指纹
158
+ # 如果需要清理,可以在设置中配置
159
+ if spider.crawler.settings.getbool('REDIS_DEDUP_CLEANUP', False):
160
+ deleted = self.redis_client.delete(self.redis_key)
161
+ self.logger.info(f" - 清理的指纹数: {deleted}")
162
+ except Exception as e:
163
+ self.logger.error(f"关闭爬虫时出错: {e}")