crawlo 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__version__.py +1 -1
- crawlo/crawler.py +133 -18
- crawlo/middleware/retry.py +2 -1
- crawlo/network/request.py +194 -115
- crawlo/network/response.py +122 -53
- crawlo/pipelines/mysql_pipeline.py +38 -19
- crawlo/subscriber.py +90 -11
- crawlo/utils/concurrency_manager.py +125 -0
- crawlo/utils/tools.py +303 -0
- {crawlo-1.0.1.dist-info → crawlo-1.0.2.dist-info}/METADATA +23 -11
- {crawlo-1.0.1.dist-info → crawlo-1.0.2.dist-info}/RECORD +14 -13
- crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
- {crawlo-1.0.1.dist-info → crawlo-1.0.2.dist-info}/WHEEL +0 -0
- {crawlo-1.0.1.dist-info → crawlo-1.0.2.dist-info}/entry_points.txt +0 -0
- {crawlo-1.0.1.dist-info → crawlo-1.0.2.dist-info}/top_level.txt +0 -0
crawlo/network/response.py
CHANGED
|
@@ -2,8 +2,8 @@
|
|
|
2
2
|
# -*- coding:UTF-8 -*-
|
|
3
3
|
import re
|
|
4
4
|
import ujson
|
|
5
|
-
from typing import Dict
|
|
6
|
-
from parsel import Selector
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from parsel import Selector, SelectorList
|
|
7
7
|
from http.cookies import SimpleCookie
|
|
8
8
|
from urllib.parse import urljoin as _urljoin
|
|
9
9
|
|
|
@@ -11,17 +11,20 @@ from crawlo import Request
|
|
|
11
11
|
from crawlo.exceptions import DecodeError
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
class Response
|
|
14
|
+
class Response:
|
|
15
|
+
"""
|
|
16
|
+
HTTP响应的封装,提供数据解析的便捷方法。
|
|
17
|
+
"""
|
|
15
18
|
|
|
16
19
|
def __init__(
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
20
|
+
self,
|
|
21
|
+
url: str,
|
|
22
|
+
*,
|
|
23
|
+
headers: Dict[str, Any],
|
|
24
|
+
body: bytes = b"",
|
|
25
|
+
method: str = 'GET',
|
|
26
|
+
request: Request = None,
|
|
27
|
+
status_code: int = 200,
|
|
25
28
|
):
|
|
26
29
|
self.url = url
|
|
27
30
|
self.headers = headers
|
|
@@ -29,65 +32,131 @@ class Response(object):
|
|
|
29
32
|
self.method = method
|
|
30
33
|
self.request = request
|
|
31
34
|
self.status_code = status_code
|
|
32
|
-
self.encoding = request.encoding
|
|
33
|
-
self._selector = None
|
|
35
|
+
self.encoding = self.request.encoding if self.request else None
|
|
34
36
|
self._text_cache = None
|
|
37
|
+
self._selector_instance = None # 修改变量名,避免与 @property 冲突
|
|
35
38
|
|
|
36
39
|
@property
|
|
37
|
-
def text(self):
|
|
38
|
-
|
|
39
|
-
if self._text_cache:
|
|
40
|
+
def text(self) -> str:
|
|
41
|
+
"""将响应体(body)以正确的编码解码为字符串,并缓存结果。"""
|
|
42
|
+
if self._text_cache is not None:
|
|
40
43
|
return self._text_cache
|
|
44
|
+
|
|
45
|
+
encoding = self.encoding
|
|
41
46
|
try:
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
47
|
+
# 优先使用 request 提供的编码
|
|
48
|
+
if encoding:
|
|
49
|
+
self._text_cache = self.body.decode(encoding)
|
|
50
|
+
return self._text_cache
|
|
51
|
+
|
|
52
|
+
# 从 Content-Type 中提取编码
|
|
53
|
+
content_type = self.headers.get("Content-Type", "")
|
|
54
|
+
charset_match = re.search(r"charset=([\w-]+)", content_type, re.I)
|
|
55
|
+
if charset_match:
|
|
56
|
+
encoding = charset_match.group(1)
|
|
57
|
+
self._text_cache = self.body.decode(encoding)
|
|
58
|
+
return self._text_cache
|
|
59
|
+
|
|
60
|
+
# 默认尝试 UTF-8
|
|
61
|
+
self._text_cache = self.body.decode("utf-8")
|
|
62
|
+
return self._text_cache
|
|
63
|
+
|
|
64
|
+
except UnicodeDecodeError as e:
|
|
65
|
+
raise DecodeError(f"Failed to decode response from {self.url}: {e}")
|
|
66
|
+
|
|
67
|
+
def json(self) -> Any:
|
|
68
|
+
"""将响应文本解析为 JSON 对象。"""
|
|
60
69
|
return ujson.loads(self.text)
|
|
61
70
|
|
|
62
|
-
def urljoin(self, url):
|
|
71
|
+
def urljoin(self, url: str) -> str:
|
|
72
|
+
"""拼接 URL,自动处理相对路径。"""
|
|
63
73
|
return _urljoin(self.url, url)
|
|
64
74
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
75
|
+
@property
|
|
76
|
+
def _selector(self) -> Selector:
|
|
77
|
+
"""懒加载 Selector 实例"""
|
|
78
|
+
if self._selector_instance is None:
|
|
79
|
+
self._selector_instance = Selector(self.text)
|
|
80
|
+
return self._selector_instance
|
|
81
|
+
|
|
82
|
+
def xpath(self, query: str) -> SelectorList:
|
|
83
|
+
"""使用 XPath 选择器查询文档。"""
|
|
84
|
+
return self._selector.xpath(query)
|
|
85
|
+
|
|
86
|
+
def css(self, query: str) -> SelectorList:
|
|
87
|
+
"""使用 CSS 选择器查询文档。"""
|
|
88
|
+
return self._selector.css(query)
|
|
89
|
+
|
|
90
|
+
def xpath_text(self, query: str) -> str:
|
|
91
|
+
"""使用 XPath 提取并返回纯文本。"""
|
|
92
|
+
fragments = self.xpath(f"{query}//text()").getall()
|
|
93
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
94
|
+
|
|
95
|
+
def css_text(self, query: str) -> str:
|
|
96
|
+
"""使用 CSS 选择器提取并返回纯文本。"""
|
|
97
|
+
fragments = self.css(f"{query} ::text").getall()
|
|
98
|
+
return " ".join(text.strip() for text in fragments if text.strip())
|
|
99
|
+
|
|
100
|
+
def get_text(self, xpath_or_css: str, join_str: str = " ") -> str:
|
|
101
|
+
"""
|
|
102
|
+
获取指定节点的纯文本(自动拼接子节点文本)
|
|
103
|
+
|
|
104
|
+
参数:
|
|
105
|
+
xpath_or_css: XPath或CSS选择器
|
|
106
|
+
join_str: 文本拼接分隔符(默认为空格)
|
|
107
|
+
|
|
108
|
+
返回:
|
|
109
|
+
拼接后的纯文本字符串
|
|
110
|
+
"""
|
|
111
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
112
|
+
texts = elements.xpath('.//text()').getall()
|
|
113
|
+
return join_str.join(text.strip() for text in texts if text.strip())
|
|
114
|
+
|
|
115
|
+
def get_all_text(self, xpath_or_css: str, join_str: str = " ") -> List[str]:
|
|
116
|
+
"""
|
|
117
|
+
获取多个节点的纯文本列表
|
|
118
|
+
|
|
119
|
+
参数:
|
|
120
|
+
xpath_or_css: XPath或CSS选择器
|
|
121
|
+
join_str: 单个节点内文本拼接分隔符
|
|
69
122
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
123
|
+
返回:
|
|
124
|
+
纯文本列表(每个元素对应一个节点的文本)
|
|
125
|
+
"""
|
|
126
|
+
elements = self.xpath(xpath_or_css) if xpath_or_css.startswith(('/', '//', './')) else self.css(xpath_or_css)
|
|
127
|
+
result = []
|
|
128
|
+
for element in elements:
|
|
129
|
+
texts = element.xpath('.//text()').getall()
|
|
130
|
+
clean_text = join_str.join(text.strip() for text in texts if text.strip())
|
|
131
|
+
if clean_text:
|
|
132
|
+
result.append(clean_text)
|
|
133
|
+
return result
|
|
74
134
|
|
|
75
|
-
def re_search(self, pattern, flags=re.DOTALL):
|
|
135
|
+
def re_search(self, pattern: str, flags: int = re.DOTALL) -> Optional[re.Match]:
|
|
136
|
+
"""在响应文本上执行正则表达式搜索。"""
|
|
137
|
+
if not isinstance(pattern, str):
|
|
138
|
+
raise TypeError("Pattern must be a string")
|
|
76
139
|
return re.search(pattern, self.text, flags=flags)
|
|
77
140
|
|
|
78
|
-
def re_findall(self, pattern, flags=re.DOTALL):
|
|
141
|
+
def re_findall(self, pattern: str, flags: int = re.DOTALL) -> List[Any]:
|
|
142
|
+
"""在响应文本上执行正则表达式查找。"""
|
|
143
|
+
if not isinstance(pattern, str):
|
|
144
|
+
raise TypeError("Pattern must be a string")
|
|
79
145
|
return re.findall(pattern, self.text, flags=flags)
|
|
80
146
|
|
|
81
|
-
def get_cookies(self):
|
|
82
|
-
|
|
147
|
+
def get_cookies(self) -> Dict[str, str]:
|
|
148
|
+
"""从响应头中解析并返回Cookies。"""
|
|
149
|
+
cookie_header = self.headers.get("Set-Cookie", "")
|
|
150
|
+
if isinstance(cookie_header, list):
|
|
151
|
+
cookie_header = ", ".join(cookie_header)
|
|
83
152
|
cookies = SimpleCookie()
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
return {k: v.value for k, v in cookies.items()}
|
|
153
|
+
cookies.load(cookie_header)
|
|
154
|
+
return {key: morsel.value for key, morsel in cookies.items()}
|
|
87
155
|
|
|
88
156
|
@property
|
|
89
|
-
def meta(self):
|
|
90
|
-
|
|
157
|
+
def meta(self) -> Dict:
|
|
158
|
+
"""获取关联的 Request 对象的 meta 字典。"""
|
|
159
|
+
return self.request.meta if self.request else {}
|
|
91
160
|
|
|
92
161
|
def __str__(self):
|
|
93
|
-
return f"{self.
|
|
162
|
+
return f"<{self.status_code} {self.url}>"
|
|
@@ -5,6 +5,7 @@ from typing import Optional
|
|
|
5
5
|
from asyncmy import create_pool
|
|
6
6
|
from crawlo.utils.log import get_logger
|
|
7
7
|
from crawlo.exceptions import ItemDiscard
|
|
8
|
+
from crawlo.utils.tools import make_insert_sql, logger
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
class AsyncmyMySQLPipeline:
|
|
@@ -54,29 +55,28 @@ class AsyncmyMySQLPipeline:
|
|
|
54
55
|
self.logger.error(f"MySQL连接池初始化失败: {e}")
|
|
55
56
|
raise
|
|
56
57
|
|
|
57
|
-
async def process_item(self, item, spider) -> Optional[dict]:
|
|
58
|
+
async def process_item(self, item, spider, kwargs=None) -> Optional[dict]:
|
|
58
59
|
"""处理item的核心方法"""
|
|
60
|
+
kwargs = kwargs or {}
|
|
61
|
+
spider_name = getattr(spider, 'name', 'unknown') # 获取爬虫名称
|
|
59
62
|
try:
|
|
60
63
|
await self._ensure_pool()
|
|
61
|
-
|
|
62
64
|
item_dict = dict(item)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
self.logger.error(f"MySQL插入失败: {e}")
|
|
79
|
-
raise ItemDiscard(f"MySQL插入失败: {e}")
|
|
65
|
+
sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
|
|
66
|
+
|
|
67
|
+
rowcount = await self._execute_sql(sql=sql)
|
|
68
|
+
if rowcount > 1:
|
|
69
|
+
self.logger.info(
|
|
70
|
+
f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
|
|
71
|
+
)
|
|
72
|
+
elif rowcount == 1:
|
|
73
|
+
self.logger.debug(
|
|
74
|
+
f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
|
|
75
|
+
)
|
|
76
|
+
else:
|
|
77
|
+
self.logger.warning(
|
|
78
|
+
f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
|
|
79
|
+
)
|
|
80
80
|
|
|
81
81
|
return item
|
|
82
82
|
|
|
@@ -84,6 +84,25 @@ class AsyncmyMySQLPipeline:
|
|
|
84
84
|
self.logger.error(f"处理item时发生错误: {e}")
|
|
85
85
|
raise ItemDiscard(f"处理失败: {e}")
|
|
86
86
|
|
|
87
|
+
async def _execute_sql(self, sql: str, values: list = None) -> int:
|
|
88
|
+
"""执行SQL语句并处理结果"""
|
|
89
|
+
async with self.pool.acquire() as conn:
|
|
90
|
+
async with conn.cursor() as cursor:
|
|
91
|
+
try:
|
|
92
|
+
# 根据是否有参数值选择不同的执行方法
|
|
93
|
+
if values is not None:
|
|
94
|
+
rowcount = await cursor.execute(sql, values)
|
|
95
|
+
else:
|
|
96
|
+
rowcount = await cursor.execute(sql)
|
|
97
|
+
|
|
98
|
+
await conn.commit()
|
|
99
|
+
self.crawler.stats.inc_value('mysql/insert_success')
|
|
100
|
+
return rowcount
|
|
101
|
+
except Exception as e:
|
|
102
|
+
await conn.rollback()
|
|
103
|
+
self.crawler.stats.inc_value('mysql/insert_failed')
|
|
104
|
+
raise ItemDiscard(f"MySQL插入失败: {e}")
|
|
105
|
+
|
|
87
106
|
async def spider_closed(self):
|
|
88
107
|
"""关闭爬虫时清理资源"""
|
|
89
108
|
if self.pool:
|
crawlo/subscriber.py
CHANGED
|
@@ -3,25 +3,104 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
from collections import defaultdict
|
|
5
5
|
from inspect import iscoroutinefunction
|
|
6
|
-
from typing import Dict, Set, Callable, Coroutine
|
|
6
|
+
from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
|
|
9
|
+
class ReceiverTypeError(TypeError):
|
|
10
|
+
"""当订阅的接收者不是一个协程函数时抛出。"""
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
|
|
9
15
|
|
|
10
16
|
|
|
11
17
|
class Subscriber:
|
|
18
|
+
"""
|
|
19
|
+
一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
|
|
20
|
+
|
|
21
|
+
这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
|
|
22
|
+
以并发的方式异步地通知所有订阅者。
|
|
23
|
+
"""
|
|
12
24
|
|
|
13
25
|
def __init__(self):
|
|
14
|
-
|
|
26
|
+
"""初始化一个空的订阅者字典。"""
|
|
27
|
+
self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
|
|
15
28
|
|
|
16
|
-
def subscribe(self, receiver:
|
|
29
|
+
def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
|
|
30
|
+
"""
|
|
31
|
+
订阅一个事件。
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
receiver: 一个协程函数 (例如 async def my_func(...))。
|
|
35
|
+
event: 要订阅的事件名称。
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
|
|
39
|
+
"""
|
|
17
40
|
if not iscoroutinefunction(receiver):
|
|
18
|
-
raise ReceiverTypeError(f"{receiver.__qualname__}
|
|
41
|
+
raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
|
|
19
42
|
self._subscribers[event].add(receiver)
|
|
20
43
|
|
|
21
|
-
def unsubscribe(self, receiver:
|
|
22
|
-
|
|
44
|
+
def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
|
|
45
|
+
"""
|
|
46
|
+
取消订阅一个事件。
|
|
47
|
+
|
|
48
|
+
如果事件或接收者不存在,将静默处理。
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
receiver: 要取消订阅的协程函数。
|
|
52
|
+
event: 事件名称。
|
|
53
|
+
"""
|
|
54
|
+
if event in self._subscribers:
|
|
55
|
+
self._subscribers[event].discard(receiver)
|
|
56
|
+
|
|
57
|
+
async def notify(self, event: str, *args, **kwargs) -> List[Any]:
|
|
58
|
+
"""
|
|
59
|
+
异步地、并发地通知所有订阅了该事件的接收者。
|
|
60
|
+
|
|
61
|
+
此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
event: 要触发的事件名称。
|
|
65
|
+
*args: 传递给接收者的位置参数。
|
|
66
|
+
**kwargs: 传递给接收者的关键字参数。
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
|
|
70
|
+
"""
|
|
71
|
+
receivers = self._subscribers.get(event, set())
|
|
72
|
+
if not receivers:
|
|
73
|
+
return []
|
|
74
|
+
|
|
75
|
+
tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
|
|
76
|
+
|
|
77
|
+
# 并发执行所有任务并返回结果列表(包括异常)
|
|
78
|
+
return await asyncio.gather(*tasks, return_exceptions=True)
|
|
23
79
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
80
|
+
# #!/usr/bin/python
|
|
81
|
+
# # -*- coding:UTF-8 -*-
|
|
82
|
+
# import asyncio
|
|
83
|
+
# from collections import defaultdict
|
|
84
|
+
# from inspect import iscoroutinefunction
|
|
85
|
+
# from typing import Dict, Set, Callable, Coroutine
|
|
86
|
+
#
|
|
87
|
+
# from crawlo.exceptions import ReceiverTypeError
|
|
88
|
+
#
|
|
89
|
+
#
|
|
90
|
+
# class Subscriber:
|
|
91
|
+
#
|
|
92
|
+
# def __init__(self):
|
|
93
|
+
# self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
|
|
94
|
+
#
|
|
95
|
+
# def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
|
|
96
|
+
# if not iscoroutinefunction(receiver):
|
|
97
|
+
# raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
|
|
98
|
+
# self._subscribers[event].add(receiver)
|
|
99
|
+
#
|
|
100
|
+
# def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
|
|
101
|
+
# self._subscribers[event].discard(receiver)
|
|
102
|
+
#
|
|
103
|
+
# async def notify(self, event: str, *args, **kwargs) -> None:
|
|
104
|
+
# for receiver in self._subscribers[event]:
|
|
105
|
+
# # 不能 await
|
|
106
|
+
# asyncio.create_task(receiver(*args, **kwargs))
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import platform
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
import psutil # 用于获取系统资源信息的第三方库
|
|
8
|
+
except ImportError:
|
|
9
|
+
psutil = None # 如果psutil不可用则设为None
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def calculate_optimal_concurrency(user_specified: Optional[int] = None, use_logical_cores: bool = True) -> int:
|
|
15
|
+
"""
|
|
16
|
+
基于系统资源计算最优并发数,或使用用户指定值
|
|
17
|
+
|
|
18
|
+
参数:
|
|
19
|
+
user_specified: 用户指定的并发数(优先使用)
|
|
20
|
+
use_logical_cores: 是否使用逻辑CPU核心数(超线程),默认为True
|
|
21
|
+
|
|
22
|
+
返回:
|
|
23
|
+
计算得出的最优并发数
|
|
24
|
+
|
|
25
|
+
说明:
|
|
26
|
+
1. 优先使用用户指定的并发数
|
|
27
|
+
2. 根据操作系统类型采用不同的计算策略:
|
|
28
|
+
- Windows: 保守计算,避免内存压力
|
|
29
|
+
- macOS: 平衡资源使用
|
|
30
|
+
- Linux: 充分利用服务器资源
|
|
31
|
+
- 其他系统: 使用合理默认值
|
|
32
|
+
3. 使用可用内存和CPU核心数进行计算
|
|
33
|
+
4. 提供psutil不可用时的备用方案
|
|
34
|
+
"""
|
|
35
|
+
# 优先使用用户指定的并发数
|
|
36
|
+
if user_specified is not None:
|
|
37
|
+
logger.info(f"使用用户指定的并发数: {user_specified}")
|
|
38
|
+
return user_specified
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
current_os = platform.system() # 获取当前操作系统类型
|
|
42
|
+
logger.debug(f"检测到操作系统: {current_os}")
|
|
43
|
+
|
|
44
|
+
# 获取CPU核心数(根据参数决定是否使用逻辑核心)
|
|
45
|
+
cpu_count = psutil.cpu_count(logical=use_logical_cores) or 1 if psutil else os.cpu_count() or 1
|
|
46
|
+
|
|
47
|
+
# 根据操作系统类型选择不同的计算方法
|
|
48
|
+
if current_os == "Windows":
|
|
49
|
+
concurrency = _get_concurrency_for_windows(cpu_count, use_logical_cores)
|
|
50
|
+
elif current_os == "Darwin": # macOS系统
|
|
51
|
+
concurrency = _get_concurrency_for_macos(cpu_count, use_logical_cores)
|
|
52
|
+
elif current_os == "Linux":
|
|
53
|
+
concurrency = _get_concurrency_for_linux(cpu_count, use_logical_cores)
|
|
54
|
+
else: # 其他操作系统
|
|
55
|
+
concurrency = _get_concurrency_default(cpu_count)
|
|
56
|
+
|
|
57
|
+
logger.info(f"计算得到最大并发数: {concurrency}")
|
|
58
|
+
return concurrency
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
logger.warning(f"动态计算并发数失败: {str(e)},使用默认值50")
|
|
62
|
+
return 50 # 计算失败时的安全默认值
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _get_concurrency_for_windows(cpu_count: int, use_logical_cores: bool) -> int:
|
|
66
|
+
"""Windows系统专用的并发数计算逻辑"""
|
|
67
|
+
if psutil:
|
|
68
|
+
# 计算可用内存(GB)
|
|
69
|
+
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
70
|
+
# 内存计算:每4GB可用内存分配10个并发
|
|
71
|
+
mem_based = int((available_memory / 4) * 10)
|
|
72
|
+
# CPU计算:使用逻辑核心时乘数较大
|
|
73
|
+
cpu_based = cpu_count * (5 if use_logical_cores else 3)
|
|
74
|
+
# 取5-100之间的值,选择内存和CPU限制中较小的
|
|
75
|
+
return max(5, min(100, mem_based, cpu_based))
|
|
76
|
+
else:
|
|
77
|
+
# 无psutil时的备用方案
|
|
78
|
+
return min(50, cpu_count * 5)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _get_concurrency_for_macos(cpu_count: int, use_logical_cores: bool) -> int:
|
|
82
|
+
"""macOS系统专用的并发数计算逻辑"""
|
|
83
|
+
if psutil:
|
|
84
|
+
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
85
|
+
# 内存计算:每3GB可用内存分配10个并发
|
|
86
|
+
mem_based = int((available_memory / 3) * 10)
|
|
87
|
+
# CPU计算:使用逻辑核心时乘数较大
|
|
88
|
+
cpu_based = cpu_count * (6 if use_logical_cores else 4)
|
|
89
|
+
# 取5-120之间的值
|
|
90
|
+
return max(5, min(120, mem_based, cpu_based))
|
|
91
|
+
else:
|
|
92
|
+
try:
|
|
93
|
+
# macOS备用方案:使用系统命令获取物理CPU核心数
|
|
94
|
+
import subprocess
|
|
95
|
+
output = subprocess.check_output(["sysctl", "hw.physicalcpu"])
|
|
96
|
+
cpu_count = int(output.split()[1])
|
|
97
|
+
return min(60, cpu_count * 5)
|
|
98
|
+
except:
|
|
99
|
+
return 40 # Mac电脑的合理默认值
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _get_concurrency_for_linux(cpu_count: int, use_logical_cores: bool) -> int:
|
|
103
|
+
"""Linux系统专用的并发数计算逻辑(更激进)"""
|
|
104
|
+
if psutil:
|
|
105
|
+
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
106
|
+
# 内存计算:每1.5GB可用内存分配10个并发
|
|
107
|
+
mem_based = int((available_memory / 1.5) * 10)
|
|
108
|
+
# CPU计算:服务器环境使用更大的乘数
|
|
109
|
+
cpu_based = cpu_count * (8 if use_logical_cores else 5)
|
|
110
|
+
# 取5-200之间的值
|
|
111
|
+
return max(5, min(200, mem_based, cpu_based))
|
|
112
|
+
else:
|
|
113
|
+
try:
|
|
114
|
+
# Linux备用方案:解析/proc/cpuinfo文件
|
|
115
|
+
with open("/proc/cpuinfo") as f:
|
|
116
|
+
cpu_count = f.read().count("processor\t:")
|
|
117
|
+
if cpu_count > 0:
|
|
118
|
+
return min(200, cpu_count * 8)
|
|
119
|
+
except:
|
|
120
|
+
return 50 # Linux服务器的合理默认值
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _get_concurrency_default(cpu_count: int) -> int:
|
|
124
|
+
"""未知操作系统的默认计算逻辑"""
|
|
125
|
+
return min(50, cpu_count * 5) # 保守的默认计算方式
|