crawlo 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (40) hide show
  1. crawlo/__init__.py +1 -0
  2. crawlo/__version__.py +1 -1
  3. crawlo/core/engine.py +9 -7
  4. crawlo/core/processor.py +1 -1
  5. crawlo/core/scheduler.py +32 -8
  6. crawlo/crawler.py +133 -18
  7. crawlo/downloader/playwright_downloader.py +161 -0
  8. crawlo/extension/log_stats.py +4 -4
  9. crawlo/filters/__init__.py +37 -0
  10. crawlo/filters/aioredis_filter.py +130 -0
  11. crawlo/filters/memory_filter.py +203 -0
  12. crawlo/filters/redis_filter.py +120 -0
  13. crawlo/items/__init__.py +40 -2
  14. crawlo/items/items.py +36 -5
  15. crawlo/middleware/retry.py +8 -2
  16. crawlo/network/request.py +215 -33
  17. crawlo/network/response.py +122 -53
  18. crawlo/pipelines/console_pipeline.py +28 -8
  19. crawlo/pipelines/mongo_pipeline.py +114 -2
  20. crawlo/pipelines/mysql_batch_pipline.py +134 -0
  21. crawlo/pipelines/mysql_pipeline.py +192 -2
  22. crawlo/pipelines/pipeline_manager.py +3 -3
  23. crawlo/settings/default_settings.py +51 -1
  24. crawlo/spider/__init__.py +2 -2
  25. crawlo/subscriber.py +90 -11
  26. crawlo/utils/concurrency_manager.py +125 -0
  27. crawlo/utils/date_tools.py +165 -8
  28. crawlo/utils/func_tools.py +74 -14
  29. crawlo/utils/pqueue.py +166 -8
  30. crawlo/utils/project.py +3 -2
  31. crawlo/utils/request.py +85 -0
  32. crawlo/utils/tools.py +303 -0
  33. crawlo/utils/url.py +40 -0
  34. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/METADATA +23 -11
  35. crawlo-1.0.2.dist-info/RECORD +68 -0
  36. crawlo-1.0.0.dist-info/RECORD +0 -59
  37. crawlo-1.0.0.dist-info/licenses/LICENSE +0 -23
  38. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/WHEEL +0 -0
  39. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/entry_points.txt +0 -0
  40. {crawlo-1.0.0.dist-info → crawlo-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,134 @@
1
+ import asyncio
2
+ import logging
3
+ from crawlo.exceptions import NotConfiguredError
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+
8
+ class AsyncmyMySQLPipeline:
9
+ """
10
+ 异步 MySQL 管道,用于批量将爬取的数据存储到 MySQL 数据库
11
+ """
12
+
13
+ def __init__(self, host, port, user, password, db, batch_size, table_name):
14
+ self.host = host
15
+ self.port = port
16
+ self.user = user
17
+ self.password = password
18
+ self.db = db
19
+ self.batch_size = batch_size
20
+ self.table_name = table_name
21
+ self.buffer = []
22
+ self.pool = None
23
+ self.flush_task = None
24
+ self.insert_stmt = None
25
+
26
+ @classmethod
27
+ def from_crawler(cls, crawler):
28
+ """从爬虫配置中获取数据库连接参数"""
29
+ batch_size = crawler.settings.get_int('MYSQL_BATCH_SIZE', 100)
30
+
31
+ return cls(
32
+ host=crawler.settings.get('MYSQL_HOST'),
33
+ port=crawler.settings.get('MYSQL_PORT'),
34
+ user=crawler.settings.get('MYSQL_USER'),
35
+ password=crawler.settings.get('MYSQL_PASSWORD'),
36
+ db=crawler.settings.get('MYSQL_DB'),
37
+ batch_size=batch_size,
38
+ table_name=crawler.settings.get('MYSQL_TABLE'),
39
+ )
40
+
41
+ async def open_spider(self, spider):
42
+ """爬虫启动时初始化数据库连接池"""
43
+ try:
44
+ import asyncmy
45
+ self.pool = await asyncmy.create_pool(
46
+ host=self.host,
47
+ port=self.port,
48
+ user=self.user,
49
+ password=self.password,
50
+ db=self.db,
51
+ autocommit=True,
52
+ charset='utf8mb4',
53
+ cursorclass=asyncmy.cursors.DictCursor
54
+ )
55
+ logger.info(f"MySQL 连接池已创建,主机: {self.host}, 数据库: {self.db}")
56
+
57
+ # 创建自动刷新任务
58
+ self.flush_task = asyncio.create_task(self._auto_flush())
59
+
60
+ except Exception as e:
61
+ logger.error(f"无法创建 MySQL 连接池: {e}")
62
+ raise NotConfiguredError(f"MySQL 连接失败: {e}")
63
+
64
+ async def process_item(self, item, spider):
65
+ """处理爬取的每个项目,添加到缓冲区"""
66
+ self.buffer.append(dict(item))
67
+
68
+ # 当缓冲区达到批量大小时自动刷新
69
+ if len(self.buffer) >= self.batch_size:
70
+ await self._flush_buffer()
71
+
72
+ return item
73
+
74
+ async def _flush_buffer(self):
75
+ """将缓冲区中的数据批量写入数据库"""
76
+ if not self.buffer:
77
+ return
78
+
79
+ try:
80
+ async with self.pool.acquire() as conn:
81
+ async with conn.cursor() as cursor:
82
+ # 动态生成插入语句
83
+ if not self.insert_stmt:
84
+ columns = ', '.join(self.buffer[0].keys())
85
+ placeholders = ', '.join(['%s'] * len(self.buffer[0]))
86
+ self.insert_stmt = f"INSERT INTO {self.table_name} ({columns}) VALUES ({placeholders})"
87
+
88
+ # 准备批量插入的数据
89
+ values = [tuple(item.values()) for item in self.buffer]
90
+
91
+ # 执行批量插入
92
+ await cursor.executemany(self.insert_stmt, values)
93
+ logger.debug(f"已批量插入 {len(values)} 条记录到 {self.table_name}")
94
+
95
+ # 清空缓冲区
96
+ self.buffer.clear()
97
+
98
+ except Exception as e:
99
+ logger.error(f"批量插入失败: {e}")
100
+ # 发生错误时保留数据,避免数据丢失
101
+ # 实际生产环境中可能需要更复杂的错误处理策略
102
+
103
+ async def _auto_flush(self):
104
+ """定期自动刷新缓冲区,防止数据长时间停留在内存中"""
105
+ try:
106
+ while True:
107
+ await asyncio.sleep(10) # 每10秒检查一次
108
+ if self.buffer:
109
+ await self._flush_buffer()
110
+ except asyncio.CancelledError:
111
+ logger.info("自动刷新任务已取消")
112
+
113
+ async def spider_closed(self, spider):
114
+ """爬虫关闭时执行清理工作"""
115
+ try:
116
+ # 取消自动刷新任务
117
+ if self.flush_task:
118
+ self.flush_task.cancel()
119
+ await self.flush_task # 等待任务完成取消
120
+
121
+ # 确保缓冲区中的剩余数据被写入数据库
122
+ if self.buffer:
123
+ await self._flush_buffer()
124
+
125
+ except asyncio.CancelledError:
126
+ logger.info("爬虫关闭过程中自动刷新任务被取消")
127
+ except Exception as e:
128
+ logger.error(f"爬虫关闭时发生错误: {e}")
129
+ finally:
130
+ # 关闭数据库连接池
131
+ if self.pool:
132
+ self.pool.close()
133
+ await self.pool.wait_closed()
134
+ logger.info("MySQL 连接池已关闭")
@@ -1,5 +1,195 @@
1
1
  # -*- coding: utf-8 -*-
2
+ import asyncio
3
+ import aiomysql
4
+ from typing import Optional
5
+ from asyncmy import create_pool
6
+ from crawlo.utils.log import get_logger
7
+ from crawlo.exceptions import ItemDiscard
8
+ from crawlo.utils.tools import make_insert_sql, logger
2
9
 
3
10
 
4
- class MySQLPipeline(object):
5
- pass
11
+ class AsyncmyMySQLPipeline:
12
+ def __init__(self, crawler):
13
+ self.crawler = crawler
14
+ self.settings = crawler.settings
15
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
16
+
17
+ # 使用异步锁和初始化标志确保线程安全
18
+ self._pool_lock = asyncio.Lock()
19
+ self._pool_initialized = False
20
+ self.pool = None
21
+ self.table_name = (
22
+ self.settings.get('MYSQL_TABLE') or
23
+ getattr(crawler.spider, 'mysql_table', None) or
24
+ f"{crawler.spider.name}_items"
25
+ )
26
+
27
+ # 注册关闭事件
28
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
29
+
30
+ @classmethod
31
+ def from_crawler(cls, crawler):
32
+ return cls(crawler)
33
+
34
+ async def _ensure_pool(self):
35
+ """确保连接池已初始化(线程安全)"""
36
+ if self._pool_initialized:
37
+ return
38
+
39
+ async with self._pool_lock:
40
+ if not self._pool_initialized: # 双重检查避免竞争条件
41
+ try:
42
+ self.pool = await create_pool(
43
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
44
+ port=self.settings.get_int('MYSQL_PORT', 3306),
45
+ user=self.settings.get('MYSQL_USER', 'root'),
46
+ password=self.settings.get('MYSQL_PASSWORD', ''),
47
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
48
+ minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
49
+ maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
50
+ echo=self.settings.get_bool('MYSQL_ECHO', False)
51
+ )
52
+ self._pool_initialized = True
53
+ self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
54
+ except Exception as e:
55
+ self.logger.error(f"MySQL连接池初始化失败: {e}")
56
+ raise
57
+
58
+ async def process_item(self, item, spider, kwargs=None) -> Optional[dict]:
59
+ """处理item的核心方法"""
60
+ kwargs = kwargs or {}
61
+ spider_name = getattr(spider, 'name', 'unknown') # 获取爬虫名称
62
+ try:
63
+ await self._ensure_pool()
64
+ item_dict = dict(item)
65
+ sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
66
+
67
+ rowcount = await self._execute_sql(sql=sql)
68
+ if rowcount > 1:
69
+ self.logger.info(
70
+ f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
71
+ )
72
+ elif rowcount == 1:
73
+ self.logger.debug(
74
+ f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
75
+ )
76
+ else:
77
+ self.logger.warning(
78
+ f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
79
+ )
80
+
81
+ return item
82
+
83
+ except Exception as e:
84
+ self.logger.error(f"处理item时发生错误: {e}")
85
+ raise ItemDiscard(f"处理失败: {e}")
86
+
87
+ async def _execute_sql(self, sql: str, values: list = None) -> int:
88
+ """执行SQL语句并处理结果"""
89
+ async with self.pool.acquire() as conn:
90
+ async with conn.cursor() as cursor:
91
+ try:
92
+ # 根据是否有参数值选择不同的执行方法
93
+ if values is not None:
94
+ rowcount = await cursor.execute(sql, values)
95
+ else:
96
+ rowcount = await cursor.execute(sql)
97
+
98
+ await conn.commit()
99
+ self.crawler.stats.inc_value('mysql/insert_success')
100
+ return rowcount
101
+ except Exception as e:
102
+ await conn.rollback()
103
+ self.crawler.stats.inc_value('mysql/insert_failed')
104
+ raise ItemDiscard(f"MySQL插入失败: {e}")
105
+
106
+ async def spider_closed(self):
107
+ """关闭爬虫时清理资源"""
108
+ if self.pool:
109
+ self.pool.close()
110
+ await self.pool.wait_closed()
111
+ self.logger.info("MySQL连接池已关闭")
112
+
113
+
114
+ class AiomysqlMySQLPipeline:
115
+ def __init__(self, crawler):
116
+ self.crawler = crawler
117
+ self.settings = crawler.settings
118
+ self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
119
+
120
+ # 使用异步锁和初始化标志
121
+ self._pool_lock = asyncio.Lock()
122
+ self._pool_initialized = False
123
+ self.pool = None
124
+ self.table_name = (
125
+ self.settings.get('MYSQL_TABLE') or
126
+ getattr(crawler.spider, 'mysql_table', None) or
127
+ f"{crawler.spider.name}_items"
128
+ )
129
+
130
+ crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
131
+
132
+ @classmethod
133
+ def create_instance(cls, crawler):
134
+ return cls(crawler)
135
+
136
+ async def _init_pool(self):
137
+ """延迟初始化连接池(线程安全)"""
138
+ if self._pool_initialized:
139
+ return
140
+
141
+ async with self._pool_lock:
142
+ if not self._pool_initialized:
143
+ try:
144
+ self.pool = await aiomysql.create_pool(
145
+ host=self.settings.get('MYSQL_HOST', 'localhost'),
146
+ port=self.settings.getint('MYSQL_PORT', 3306),
147
+ user=self.settings.get('MYSQL_USER', 'root'),
148
+ password=self.settings.get('MYSQL_PASSWORD', ''),
149
+ db=self.settings.get('MYSQL_DB', 'scrapy_db'),
150
+ minsize=self.settings.getint('MYSQL_POOL_MIN', 2),
151
+ maxsize=self.settings.getint('MYSQL_POOL_MAX', 5),
152
+ cursorclass=aiomysql.DictCursor,
153
+ autocommit=False
154
+ )
155
+ self._pool_initialized = True
156
+ self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
157
+ except Exception as e:
158
+ self.logger.error(f"aiomysql连接池初始化失败: {e}")
159
+ raise
160
+
161
+ async def process_item(self, item, spider) -> Optional[dict]:
162
+ """处理item方法"""
163
+ try:
164
+ await self._init_pool()
165
+
166
+ item_dict = dict(item)
167
+ sql = f"""
168
+ INSERT INTO `{self.table_name}`
169
+ ({', '.join([f'`{k}`' for k in item_dict.keys()])})
170
+ VALUES ({', '.join(['%s'] * len(item_dict))})
171
+ """
172
+
173
+ async with self.pool.acquire() as conn:
174
+ async with conn.cursor() as cursor:
175
+ try:
176
+ await cursor.execute(sql, list(item_dict.values()))
177
+ await conn.commit()
178
+ self.crawler.stats.inc_value('mysql/insert_success')
179
+ except aiomysql.Error as e:
180
+ await conn.rollback()
181
+ self.crawler.stats.inc_value('mysql/insert_failed')
182
+ raise ItemDiscard(f"MySQL错误: {e.args[1]}")
183
+
184
+ return item
185
+
186
+ except Exception as e:
187
+ self.logger.error(f"Pipeline处理异常: {e}")
188
+ raise ItemDiscard(f"处理失败: {e}")
189
+
190
+ async def spider_closed(self):
191
+ """资源清理"""
192
+ if self.pool:
193
+ self.pool.close()
194
+ await self.pool.wait_closed()
195
+ self.logger.info("aiomysql连接池已释放")
@@ -24,18 +24,18 @@ class PipelineManager:
24
24
  self._add_methods()
25
25
 
26
26
  @classmethod
27
- def create_instance(cls, *args, **kwargs):
27
+ def from_crawler(cls, *args, **kwargs):
28
28
  o = cls(*args, **kwargs)
29
29
  return o
30
30
 
31
31
  def _add_pipelines(self, pipelines):
32
32
  for pipeline in pipelines:
33
33
  pipeline_cls = load_class(pipeline)
34
- if not hasattr(pipeline_cls, 'create_instance'):
34
+ if not hasattr(pipeline_cls, 'from_crawler'):
35
35
  raise PipelineInitError(
36
36
  f"Pipeline init failed, must inherit from `BasePipeline` or have a `create_instance` method"
37
37
  )
38
- self.pipelines.append(pipeline_cls.create_instance(self.crawler))
38
+ self.pipelines.append(pipeline_cls.from_crawler(self.crawler))
39
39
  if pipelines:
40
40
  self.logger.info(f"enabled pipelines: \n {pformat(pipelines)}")
41
41
 
@@ -1,8 +1,10 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
+ # 默认项目名称
4
+ import os
3
5
 
6
+ PROJECT_NAME = 'crawlo'
4
7
  VERSION = 1.0
5
-
6
8
  # 并发数
7
9
  CONCURRENCY = 8
8
10
 
@@ -25,6 +27,10 @@ IGNORE_HTTP_CODES = [403, 404]
25
27
  RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
26
28
  # 允许通过的状态码
27
29
  ALLOWED_CODES = []
30
+ # 请求优先级设置
31
+ RETRY_PRIORITY = -1
32
+ #
33
+ DEPTH_PRIORITY = 1
28
34
 
29
35
  STATS_DUMP = True
30
36
  # ssl 验证
@@ -37,3 +43,47 @@ LOG_LEVEL = 'DEBUG'
37
43
  DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader" # HttpXDownloader
38
44
 
39
45
  EXTENSIONS = []
46
+
47
+ # --------------------------------------------------- 公共MySQL配置 -----------------------------------------------------
48
+ MYSQL_HOST = '127.0.0.1'
49
+ MYSQL_PORT = 3306
50
+ MYSQL_USER = 'scrapy_user'
51
+ MYSQL_PASSWORD = 'your_password'
52
+ MYSQL_DB = 'scrapy_data'
53
+ MYSQL_TABLE = 'crawled_data'
54
+
55
+ # asyncmy专属配置
56
+ MYSQL_POOL_MIN = 5 # 连接池最小连接数
57
+ MYSQL_POOL_MAX = 20 # 连接池最大连接数
58
+ MYSQL_ECHO = False
59
+
60
+ # 批量插入大小
61
+ MYSQL_BATCH_SIZE = 100
62
+
63
+ # --------------------------------------------------- MongoDB 基础配置 -----------------------------------------------------
64
+ MONGO_URI = 'mongodb://user:password@host:27017'
65
+ MONGO_DATABASE = 'scrapy_data'
66
+ MONGO_COLLECTION = 'crawled_items' # 可选,默认使用spider名称
67
+
68
+ # 连接池优化配置(仅方案二需要)
69
+ MONGO_MAX_POOL_SIZE = 200 # 最大连接数
70
+ MONGO_MIN_POOL_SIZE = 20 # 最小保持连接数
71
+
72
+ # 启用管道
73
+ PIPELINES = [
74
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline',
75
+ ]
76
+
77
+ # filter
78
+ REQUEST_DIR = '.'
79
+ FILTER_DEBUG = True
80
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
81
+
82
+ # redis filter
83
+ CLEANUP_FP = False
84
+ DECODE_RESPONSES = True
85
+ REDIS_KEY = 'request_fingerprint'
86
+ REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
87
+ REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', 'oscar&0503')
88
+ REDIS_PORT = os.getenv('REDIS_PORT', 6379)
89
+ REDIS_URL = f'redis://:{REDIS_PASSWORD or ""}@{REDIS_HOST}:{REDIS_PORT}/0'
crawlo/spider/__init__.py CHANGED
@@ -18,10 +18,10 @@ class Spider(object):
18
18
  def start_requests(self):
19
19
  if self.start_urls:
20
20
  for url in self.start_urls:
21
- yield Request(url=url)
21
+ yield Request(url=url, dont_filter=True)
22
22
  else:
23
23
  if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
24
- yield Request(getattr(self, 'start_url'))
24
+ yield Request(getattr(self, 'start_url'), dont_filter=True)
25
25
 
26
26
  def parse(self, response):
27
27
  raise NotImplementedError
crawlo/subscriber.py CHANGED
@@ -3,25 +3,104 @@
3
3
  import asyncio
4
4
  from collections import defaultdict
5
5
  from inspect import iscoroutinefunction
6
- from typing import Dict, Set, Callable, Coroutine
6
+ from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
7
7
 
8
- from crawlo.exceptions import ReceiverTypeError
8
+
9
+ class ReceiverTypeError(TypeError):
10
+ """当订阅的接收者不是一个协程函数时抛出。"""
11
+ pass
12
+
13
+
14
+ ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
9
15
 
10
16
 
11
17
  class Subscriber:
18
+ """
19
+ 一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
20
+
21
+ 这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
22
+ 以并发的方式异步地通知所有订阅者。
23
+ """
12
24
 
13
25
  def __init__(self):
14
- self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
26
+ """初始化一个空的订阅者字典。"""
27
+ self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
15
28
 
16
- def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
29
+ def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
30
+ """
31
+ 订阅一个事件。
32
+
33
+ Args:
34
+ receiver: 一个协程函数 (例如 async def my_func(...))。
35
+ event: 要订阅的事件名称。
36
+
37
+ Raises:
38
+ ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
39
+ """
17
40
  if not iscoroutinefunction(receiver):
18
- raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
41
+ raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
19
42
  self._subscribers[event].add(receiver)
20
43
 
21
- def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
22
- self._subscribers[event].discard(receiver)
44
+ def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
45
+ """
46
+ 取消订阅一个事件。
47
+
48
+ 如果事件或接收者不存在,将静默处理。
49
+
50
+ Args:
51
+ receiver: 要取消订阅的协程函数。
52
+ event: 事件名称。
53
+ """
54
+ if event in self._subscribers:
55
+ self._subscribers[event].discard(receiver)
56
+
57
+ async def notify(self, event: str, *args, **kwargs) -> List[Any]:
58
+ """
59
+ 异步地、并发地通知所有订阅了该事件的接收者。
60
+
61
+ 此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
62
+
63
+ Args:
64
+ event: 要触发的事件名称。
65
+ *args: 传递给接收者的位置参数。
66
+ **kwargs: 传递给接收者的关键字参数。
67
+
68
+ Returns:
69
+ 一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
70
+ """
71
+ receivers = self._subscribers.get(event, set())
72
+ if not receivers:
73
+ return []
74
+
75
+ tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
76
+
77
+ # 并发执行所有任务并返回结果列表(包括异常)
78
+ return await asyncio.gather(*tasks, return_exceptions=True)
23
79
 
24
- async def notify(self, event: str, *args, **kwargs) -> None:
25
- for receiver in self._subscribers[event]:
26
- # 不能 await
27
- asyncio.create_task(receiver(*args, **kwargs))
80
+ # #!/usr/bin/python
81
+ # # -*- coding:UTF-8 -*-
82
+ # import asyncio
83
+ # from collections import defaultdict
84
+ # from inspect import iscoroutinefunction
85
+ # from typing import Dict, Set, Callable, Coroutine
86
+ #
87
+ # from crawlo.exceptions import ReceiverTypeError
88
+ #
89
+ #
90
+ # class Subscriber:
91
+ #
92
+ # def __init__(self):
93
+ # self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
94
+ #
95
+ # def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
96
+ # if not iscoroutinefunction(receiver):
97
+ # raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
98
+ # self._subscribers[event].add(receiver)
99
+ #
100
+ # def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
101
+ # self._subscribers[event].discard(receiver)
102
+ #
103
+ # async def notify(self, event: str, *args, **kwargs) -> None:
104
+ # for receiver in self._subscribers[event]:
105
+ # # 不能 await
106
+ # asyncio.create_task(receiver(*args, **kwargs))
@@ -0,0 +1,125 @@
1
+ import os
2
+ import platform
3
+ import logging
4
+ from typing import Optional
5
+
6
+ try:
7
+ import psutil # 用于获取系统资源信息的第三方库
8
+ except ImportError:
9
+ psutil = None # 如果psutil不可用则设为None
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def calculate_optimal_concurrency(user_specified: Optional[int] = None, use_logical_cores: bool = True) -> int:
15
+ """
16
+ 基于系统资源计算最优并发数,或使用用户指定值
17
+
18
+ 参数:
19
+ user_specified: 用户指定的并发数(优先使用)
20
+ use_logical_cores: 是否使用逻辑CPU核心数(超线程),默认为True
21
+
22
+ 返回:
23
+ 计算得出的最优并发数
24
+
25
+ 说明:
26
+ 1. 优先使用用户指定的并发数
27
+ 2. 根据操作系统类型采用不同的计算策略:
28
+ - Windows: 保守计算,避免内存压力
29
+ - macOS: 平衡资源使用
30
+ - Linux: 充分利用服务器资源
31
+ - 其他系统: 使用合理默认值
32
+ 3. 使用可用内存和CPU核心数进行计算
33
+ 4. 提供psutil不可用时的备用方案
34
+ """
35
+ # 优先使用用户指定的并发数
36
+ if user_specified is not None:
37
+ logger.info(f"使用用户指定的并发数: {user_specified}")
38
+ return user_specified
39
+
40
+ try:
41
+ current_os = platform.system() # 获取当前操作系统类型
42
+ logger.debug(f"检测到操作系统: {current_os}")
43
+
44
+ # 获取CPU核心数(根据参数决定是否使用逻辑核心)
45
+ cpu_count = psutil.cpu_count(logical=use_logical_cores) or 1 if psutil else os.cpu_count() or 1
46
+
47
+ # 根据操作系统类型选择不同的计算方法
48
+ if current_os == "Windows":
49
+ concurrency = _get_concurrency_for_windows(cpu_count, use_logical_cores)
50
+ elif current_os == "Darwin": # macOS系统
51
+ concurrency = _get_concurrency_for_macos(cpu_count, use_logical_cores)
52
+ elif current_os == "Linux":
53
+ concurrency = _get_concurrency_for_linux(cpu_count, use_logical_cores)
54
+ else: # 其他操作系统
55
+ concurrency = _get_concurrency_default(cpu_count)
56
+
57
+ logger.info(f"计算得到最大并发数: {concurrency}")
58
+ return concurrency
59
+
60
+ except Exception as e:
61
+ logger.warning(f"动态计算并发数失败: {str(e)},使用默认值50")
62
+ return 50 # 计算失败时的安全默认值
63
+
64
+
65
+ def _get_concurrency_for_windows(cpu_count: int, use_logical_cores: bool) -> int:
66
+ """Windows系统专用的并发数计算逻辑"""
67
+ if psutil:
68
+ # 计算可用内存(GB)
69
+ available_memory = psutil.virtual_memory().available / (1024 ** 3)
70
+ # 内存计算:每4GB可用内存分配10个并发
71
+ mem_based = int((available_memory / 4) * 10)
72
+ # CPU计算:使用逻辑核心时乘数较大
73
+ cpu_based = cpu_count * (5 if use_logical_cores else 3)
74
+ # 取5-100之间的值,选择内存和CPU限制中较小的
75
+ return max(5, min(100, mem_based, cpu_based))
76
+ else:
77
+ # 无psutil时的备用方案
78
+ return min(50, cpu_count * 5)
79
+
80
+
81
+ def _get_concurrency_for_macos(cpu_count: int, use_logical_cores: bool) -> int:
82
+ """macOS系统专用的并发数计算逻辑"""
83
+ if psutil:
84
+ available_memory = psutil.virtual_memory().available / (1024 ** 3)
85
+ # 内存计算:每3GB可用内存分配10个并发
86
+ mem_based = int((available_memory / 3) * 10)
87
+ # CPU计算:使用逻辑核心时乘数较大
88
+ cpu_based = cpu_count * (6 if use_logical_cores else 4)
89
+ # 取5-120之间的值
90
+ return max(5, min(120, mem_based, cpu_based))
91
+ else:
92
+ try:
93
+ # macOS备用方案:使用系统命令获取物理CPU核心数
94
+ import subprocess
95
+ output = subprocess.check_output(["sysctl", "hw.physicalcpu"])
96
+ cpu_count = int(output.split()[1])
97
+ return min(60, cpu_count * 5)
98
+ except:
99
+ return 40 # Mac电脑的合理默认值
100
+
101
+
102
+ def _get_concurrency_for_linux(cpu_count: int, use_logical_cores: bool) -> int:
103
+ """Linux系统专用的并发数计算逻辑(更激进)"""
104
+ if psutil:
105
+ available_memory = psutil.virtual_memory().available / (1024 ** 3)
106
+ # 内存计算:每1.5GB可用内存分配10个并发
107
+ mem_based = int((available_memory / 1.5) * 10)
108
+ # CPU计算:服务器环境使用更大的乘数
109
+ cpu_based = cpu_count * (8 if use_logical_cores else 5)
110
+ # 取5-200之间的值
111
+ return max(5, min(200, mem_based, cpu_based))
112
+ else:
113
+ try:
114
+ # Linux备用方案:解析/proc/cpuinfo文件
115
+ with open("/proc/cpuinfo") as f:
116
+ cpu_count = f.read().count("processor\t:")
117
+ if cpu_count > 0:
118
+ return min(200, cpu_count * 8)
119
+ except:
120
+ return 50 # Linux服务器的合理默认值
121
+
122
+
123
+ def _get_concurrency_default(cpu_count: int) -> int:
124
+ """未知操作系统的默认计算逻辑"""
125
+ return min(50, cpu_count * 5) # 保守的默认计算方式