sycommon-python-lib 0.1.55a0__py3-none-any.whl → 0.1.55b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sycommon/database/async_base_db_service.py +36 -0
- sycommon/database/async_database_service.py +96 -0
- sycommon/logging/async_sql_logger.py +65 -0
- sycommon/logging/kafka_log.py +8 -8
- sycommon/middleware/traceid.py +6 -4
- sycommon/services.py +52 -51
- sycommon/synacos/feign.py +1 -1
- sycommon/synacos/feign_client.py +2 -2
- sycommon/tools/snowflake.py +160 -81
- {sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/METADATA +9 -9
- {sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/RECORD +14 -11
- {sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/WHEEL +0 -0
- {sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/entry_points.txt +0 -0
- {sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from contextlib import asynccontextmanager
|
|
2
|
+
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
|
3
|
+
from sycommon.config.Config import SingletonMeta
|
|
4
|
+
from sycommon.database.async_database_service import AsyncDatabaseService
|
|
5
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AsyncBaseDBService(metaclass=SingletonMeta):
|
|
9
|
+
"""数据库操作基础服务类,封装异步会话管理功能"""
|
|
10
|
+
|
|
11
|
+
def __init__(self):
|
|
12
|
+
# 获取异步引擎 (假设 DatabaseService.engine() 返回的是 AsyncEngine)
|
|
13
|
+
self.engine = AsyncDatabaseService.engine()
|
|
14
|
+
|
|
15
|
+
# 创建异步 Session 工厂
|
|
16
|
+
# class_=AsyncSession 是必须的,用于指定生成的是异步会话
|
|
17
|
+
self.Session = async_sessionmaker(
|
|
18
|
+
bind=self.engine,
|
|
19
|
+
class_=AsyncSession,
|
|
20
|
+
expire_on_commit=False
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
@asynccontextmanager
|
|
24
|
+
async def session(self):
|
|
25
|
+
"""
|
|
26
|
+
异步数据库会话上下文管理器
|
|
27
|
+
自动处理会话的创建、提交、回滚和关闭
|
|
28
|
+
"""
|
|
29
|
+
async with self.Session() as session:
|
|
30
|
+
try:
|
|
31
|
+
yield session
|
|
32
|
+
await session.commit()
|
|
33
|
+
except Exception as e:
|
|
34
|
+
await session.rollback()
|
|
35
|
+
SYLogger.error(f"Database operation failed: {str(e)}")
|
|
36
|
+
raise
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
|
2
|
+
from sqlalchemy import text
|
|
3
|
+
|
|
4
|
+
from sycommon.config.Config import SingletonMeta
|
|
5
|
+
from sycommon.config.DatabaseConfig import DatabaseConfig, convert_dict_keys
|
|
6
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
7
|
+
from sycommon.logging.async_sql_logger import AsyncSQLTraceLogger
|
|
8
|
+
from sycommon.synacos.nacos_service import NacosService
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AsyncDatabaseService(metaclass=SingletonMeta):
|
|
12
|
+
_engine = None
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
async def setup_database(config: dict, shareConfigKey: str):
|
|
16
|
+
common = NacosService(config).share_configs.get(shareConfigKey, {})
|
|
17
|
+
if common and common.get('spring', {}).get('datasource', None):
|
|
18
|
+
databaseConfig = common.get('spring', {}).get('datasource', None)
|
|
19
|
+
converted_dict = convert_dict_keys(databaseConfig)
|
|
20
|
+
db_config = DatabaseConfig.model_validate(converted_dict)
|
|
21
|
+
|
|
22
|
+
# 初始化 DatabaseConnector (传入配置)
|
|
23
|
+
connector = AsyncDatabaseConnector(db_config)
|
|
24
|
+
|
|
25
|
+
# 赋值 engine
|
|
26
|
+
AsyncDatabaseService._engine = connector.engine
|
|
27
|
+
|
|
28
|
+
# 执行异步测试连接
|
|
29
|
+
if not await connector.test_connection():
|
|
30
|
+
raise Exception("Database connection test failed")
|
|
31
|
+
|
|
32
|
+
@staticmethod
|
|
33
|
+
def engine():
|
|
34
|
+
return AsyncDatabaseService._engine
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class AsyncDatabaseConnector(metaclass=SingletonMeta):
|
|
38
|
+
def __init__(self, db_config: DatabaseConfig):
|
|
39
|
+
# 从 DatabaseConfig 中提取数据库连接信息
|
|
40
|
+
self.db_user = db_config.username
|
|
41
|
+
self.db_password = db_config.password
|
|
42
|
+
|
|
43
|
+
# 提取 URL 中的主机、端口和数据库名
|
|
44
|
+
url_parts = db_config.url.split('//')[1].split('/')
|
|
45
|
+
host_port = url_parts[0].split(':')
|
|
46
|
+
self.db_host = host_port[0]
|
|
47
|
+
self.db_port = host_port[1]
|
|
48
|
+
self.db_name = url_parts[1].split('?')[0]
|
|
49
|
+
|
|
50
|
+
# 提取 URL 中的参数
|
|
51
|
+
params_str = url_parts[1].split('?')[1] if len(
|
|
52
|
+
url_parts[1].split('?')) > 1 else ''
|
|
53
|
+
params = {}
|
|
54
|
+
for param in params_str.split('&'):
|
|
55
|
+
if param:
|
|
56
|
+
key, value = param.split('=')
|
|
57
|
+
params[key] = value
|
|
58
|
+
|
|
59
|
+
# 在params中去掉指定的参数
|
|
60
|
+
for key in ['useUnicode', 'characterEncoding', 'serverTimezone', 'zeroDateTimeBehavior']:
|
|
61
|
+
if key in params:
|
|
62
|
+
del params[key]
|
|
63
|
+
|
|
64
|
+
# 构建数据库连接 URL
|
|
65
|
+
# 注意:这里将 mysqlconnector 替换为 aiomysql 以支持异步
|
|
66
|
+
self.db_url = f'mysql+aiomysql://{self.db_user}:{self.db_password}@{self.db_host}:{self.db_port}/{self.db_name}'
|
|
67
|
+
|
|
68
|
+
SYLogger.info(f"Database URL: {self.db_url}")
|
|
69
|
+
|
|
70
|
+
# 优化连接池配置
|
|
71
|
+
# 使用 create_async_engine 替代 create_engine
|
|
72
|
+
self.engine = create_async_engine(
|
|
73
|
+
self.db_url,
|
|
74
|
+
connect_args=params,
|
|
75
|
+
pool_size=10, # 连接池大小
|
|
76
|
+
max_overflow=20, # 最大溢出连接数
|
|
77
|
+
pool_timeout=30, # 连接超时时间(秒)
|
|
78
|
+
pool_recycle=3600, # 连接回收时间(秒)
|
|
79
|
+
pool_pre_ping=True, # 每次获取连接前检查连接是否有效
|
|
80
|
+
echo=False, # 打印 SQL 语句
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# 注册 SQL 日志拦截器 (注意:SQLTraceLogger 需要支持异步引擎,或者您可能需要调整日志逻辑)
|
|
84
|
+
# 假设 SQLTraceLogger.setup_sql_logging 能够处理 AsyncEngine
|
|
85
|
+
AsyncSQLTraceLogger.setup_sql_logging(self.engine)
|
|
86
|
+
|
|
87
|
+
async def test_connection(self):
|
|
88
|
+
try:
|
|
89
|
+
# 异步上下文管理器
|
|
90
|
+
async with self.engine.connect() as connection:
|
|
91
|
+
# 执行简单查询
|
|
92
|
+
await connection.execute(text("SELECT 1"))
|
|
93
|
+
return True
|
|
94
|
+
except Exception as e:
|
|
95
|
+
SYLogger.error(f"Database connection test failed: {e}")
|
|
96
|
+
return False
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from sqlalchemy import event
|
|
2
|
+
from sqlalchemy.ext.asyncio import AsyncEngine
|
|
3
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
4
|
+
import time
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from decimal import Decimal
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AsyncSQLTraceLogger:
|
|
10
|
+
@staticmethod
|
|
11
|
+
def setup_sql_logging(engine):
|
|
12
|
+
"""
|
|
13
|
+
为 SQLAlchemy 异步引擎注册事件监听器
|
|
14
|
+
注意:必须监听 engine.sync_engine,而不能直接监听 AsyncEngine
|
|
15
|
+
"""
|
|
16
|
+
def serialize_params(params):
|
|
17
|
+
"""处理特殊类型参数的序列化"""
|
|
18
|
+
if isinstance(params, (list, tuple)):
|
|
19
|
+
return [serialize_params(p) for p in params]
|
|
20
|
+
elif isinstance(params, dict):
|
|
21
|
+
return {k: serialize_params(v) for k, v in params.items()}
|
|
22
|
+
elif isinstance(params, datetime):
|
|
23
|
+
return params.isoformat()
|
|
24
|
+
elif isinstance(params, Decimal):
|
|
25
|
+
return float(params)
|
|
26
|
+
else:
|
|
27
|
+
return params
|
|
28
|
+
|
|
29
|
+
# ========== 核心修改 ==========
|
|
30
|
+
# 必须通过 engine.sync_engine 来获取底层的同步引擎进行监听
|
|
31
|
+
target = engine.sync_engine
|
|
32
|
+
|
|
33
|
+
@event.listens_for(target, "after_cursor_execute")
|
|
34
|
+
def after_cursor_execute(
|
|
35
|
+
conn, cursor, statement, parameters, context, executemany
|
|
36
|
+
):
|
|
37
|
+
try:
|
|
38
|
+
# 从连接选项中获取开始时间
|
|
39
|
+
# conn 在这里是同步连接对象
|
|
40
|
+
start_time = conn.info.get('_start_time') or \
|
|
41
|
+
conn._execution_options.get("_start_time", time.time())
|
|
42
|
+
|
|
43
|
+
execution_time = (time.time() - start_time) * 1000
|
|
44
|
+
|
|
45
|
+
sql_log = {
|
|
46
|
+
"type": "SQL",
|
|
47
|
+
"statement": statement,
|
|
48
|
+
"parameters": serialize_params(parameters),
|
|
49
|
+
"execution_time_ms": round(execution_time, 2),
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# 注意:SYLogger.info 必须是线程安全的或非阻塞的,否则可能影响异步性能
|
|
53
|
+
SYLogger.info(f"SQL执行: {sql_log}")
|
|
54
|
+
except Exception as e:
|
|
55
|
+
SYLogger.error(f"SQL日志处理失败: {str(e)}")
|
|
56
|
+
|
|
57
|
+
@event.listens_for(target, "before_cursor_execute")
|
|
58
|
+
def before_cursor_execute(
|
|
59
|
+
conn, cursor, statement, parameters, context, executemany
|
|
60
|
+
):
|
|
61
|
+
try:
|
|
62
|
+
# 记录开始时间到 execution_options
|
|
63
|
+
conn = conn.execution_options(_start_time=time.time())
|
|
64
|
+
except Exception as e:
|
|
65
|
+
SYLogger.error(f"SQL开始时间记录失败: {str(e)}")
|
sycommon/logging/kafka_log.py
CHANGED
|
@@ -114,7 +114,7 @@ class KafkaLogger(metaclass=SingletonMeta):
|
|
|
114
114
|
trace_id = None
|
|
115
115
|
|
|
116
116
|
if not trace_id:
|
|
117
|
-
trace_id = SYLogger.get_trace_id() or Snowflake.
|
|
117
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.id
|
|
118
118
|
|
|
119
119
|
# 获取线程/协程信息
|
|
120
120
|
thread_info = SYLogger._get_execution_context()
|
|
@@ -173,7 +173,7 @@ class KafkaLogger(metaclass=SingletonMeta):
|
|
|
173
173
|
"className": "",
|
|
174
174
|
"sqlCost": 0,
|
|
175
175
|
"size": len(str(message)),
|
|
176
|
-
"uid": int(Snowflake.
|
|
176
|
+
"uid": int(Snowflake.id) # 独立新的id
|
|
177
177
|
}
|
|
178
178
|
|
|
179
179
|
# 智能队列管理
|
|
@@ -212,7 +212,7 @@ class KafkaLogger(metaclass=SingletonMeta):
|
|
|
212
212
|
return
|
|
213
213
|
|
|
214
214
|
# 获取当前的trace_id
|
|
215
|
-
trace_id = SYLogger.get_trace_id() or Snowflake.
|
|
215
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.id
|
|
216
216
|
|
|
217
217
|
# 构建错误日志
|
|
218
218
|
error_log = {
|
|
@@ -459,7 +459,7 @@ class SYLogger:
|
|
|
459
459
|
|
|
460
460
|
@staticmethod
|
|
461
461
|
def _log(msg: any, level: str = "INFO"):
|
|
462
|
-
trace_id = SYLogger.get_trace_id() or Snowflake.
|
|
462
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.id
|
|
463
463
|
|
|
464
464
|
if isinstance(msg, dict) or isinstance(msg, list):
|
|
465
465
|
msg_str = json.dumps(msg, ensure_ascii=False)
|
|
@@ -473,7 +473,7 @@ class SYLogger:
|
|
|
473
473
|
request_log = {}
|
|
474
474
|
if level == "ERROR":
|
|
475
475
|
request_log = {
|
|
476
|
-
"trace_id": str(trace_id) if trace_id else Snowflake.
|
|
476
|
+
"trace_id": str(trace_id) if trace_id else Snowflake.id,
|
|
477
477
|
"message": msg_str,
|
|
478
478
|
"traceback": traceback.format_exc(),
|
|
479
479
|
"level": level,
|
|
@@ -481,7 +481,7 @@ class SYLogger:
|
|
|
481
481
|
}
|
|
482
482
|
else:
|
|
483
483
|
request_log = {
|
|
484
|
-
"trace_id": str(trace_id) if trace_id else Snowflake.
|
|
484
|
+
"trace_id": str(trace_id) if trace_id else Snowflake.id,
|
|
485
485
|
"message": msg_str,
|
|
486
486
|
"level": level,
|
|
487
487
|
"threadName": thread_info
|
|
@@ -521,7 +521,7 @@ class SYLogger:
|
|
|
521
521
|
@staticmethod
|
|
522
522
|
def exception(msg: any, *args, **kwargs):
|
|
523
523
|
"""记录异常信息,包括完整堆栈"""
|
|
524
|
-
trace_id = SYLogger.get_trace_id() or Snowflake.
|
|
524
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.id
|
|
525
525
|
|
|
526
526
|
if isinstance(msg, dict) or isinstance(msg, list):
|
|
527
527
|
msg_str = json.dumps(msg, ensure_ascii=False)
|
|
@@ -533,7 +533,7 @@ class SYLogger:
|
|
|
533
533
|
|
|
534
534
|
# 构建包含异常堆栈的日志
|
|
535
535
|
request_log = {
|
|
536
|
-
"trace_id": str(trace_id) if trace_id else Snowflake.
|
|
536
|
+
"trace_id": str(trace_id) if trace_id else Snowflake.id,
|
|
537
537
|
"message": msg_str,
|
|
538
538
|
"level": "ERROR",
|
|
539
539
|
"threadName": thread_info
|
sycommon/middleware/traceid.py
CHANGED
|
@@ -12,7 +12,7 @@ def setup_trace_id_handler(app):
|
|
|
12
12
|
# 生成或获取 traceId
|
|
13
13
|
trace_id = request.headers.get("x-traceId-header")
|
|
14
14
|
if not trace_id:
|
|
15
|
-
trace_id = Snowflake.
|
|
15
|
+
trace_id = Snowflake.id
|
|
16
16
|
|
|
17
17
|
# 设置 trace_id 上下文
|
|
18
18
|
token = SYLogger.set_trace_id(trace_id)
|
|
@@ -101,9 +101,11 @@ def setup_trace_id_handler(app):
|
|
|
101
101
|
if "application/json" in content_type and not content_disposition.startswith("attachment"):
|
|
102
102
|
try:
|
|
103
103
|
data = json.loads(response_body)
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
data
|
|
104
|
+
new_body = response_body
|
|
105
|
+
if data:
|
|
106
|
+
data["traceId"] = trace_id
|
|
107
|
+
new_body = json.dumps(
|
|
108
|
+
data, ensure_ascii=False).encode()
|
|
107
109
|
|
|
108
110
|
# 创建新响应,确保Content-Length正确
|
|
109
111
|
response = Response(
|
sycommon/services.py
CHANGED
|
@@ -23,6 +23,9 @@ class Services(metaclass=SingletonMeta):
|
|
|
23
23
|
_user_lifespan: Optional[Callable] = None
|
|
24
24
|
_shutdown_lock: asyncio.Lock = asyncio.Lock()
|
|
25
25
|
|
|
26
|
+
# 用于存储待执行的异步数据库初始化任务
|
|
27
|
+
_pending_async_db_setup: List[Tuple[Callable, str]] = []
|
|
28
|
+
|
|
26
29
|
def __init__(self, config: dict, app: FastAPI):
|
|
27
30
|
if not Services._config:
|
|
28
31
|
Services._config = config
|
|
@@ -48,25 +51,24 @@ class Services(metaclass=SingletonMeta):
|
|
|
48
51
|
nacos_service: Optional[Callable[[dict], None]] = None,
|
|
49
52
|
logging_service: Optional[Callable[[dict], None]] = None,
|
|
50
53
|
database_service: Optional[Union[
|
|
51
|
-
Tuple[Callable
|
|
52
|
-
List[Tuple[Callable
|
|
54
|
+
Tuple[Callable, str],
|
|
55
|
+
List[Tuple[Callable, str]]
|
|
53
56
|
]] = None,
|
|
54
57
|
rabbitmq_listeners: Optional[List[RabbitMQListenerConfig]] = None,
|
|
55
58
|
rabbitmq_senders: Optional[List[RabbitMQSendConfig]] = None
|
|
56
59
|
) -> FastAPI:
|
|
57
60
|
load_dotenv()
|
|
58
|
-
# 保存应用实例和配置
|
|
59
61
|
cls._app = app
|
|
60
62
|
cls._config = config
|
|
61
63
|
cls._user_lifespan = app.router.lifespan_context
|
|
62
|
-
|
|
64
|
+
|
|
63
65
|
applications.get_swagger_ui_html = custom_swagger_ui_html
|
|
64
66
|
applications.get_redoc_html = custom_redoc_html
|
|
65
|
-
|
|
67
|
+
|
|
66
68
|
if not cls._config:
|
|
67
69
|
config = yaml.safe_load(open('app.yaml', 'r', encoding='utf-8'))
|
|
68
70
|
cls._config = config
|
|
69
|
-
|
|
71
|
+
|
|
70
72
|
app.state.config = {
|
|
71
73
|
"host": cls._config.get('Host', '0.0.0.0'),
|
|
72
74
|
"port": cls._config.get('Port', 8080),
|
|
@@ -74,7 +76,6 @@ class Services(metaclass=SingletonMeta):
|
|
|
74
76
|
"h11_max_incomplete_event_size": cls._config.get('H11MaxIncompleteEventSize', 1024 * 1024 * 10)
|
|
75
77
|
}
|
|
76
78
|
|
|
77
|
-
# 立即配置非异步服务(在应用启动前)
|
|
78
79
|
if middleware:
|
|
79
80
|
middleware(app, config)
|
|
80
81
|
|
|
@@ -84,8 +85,29 @@ class Services(metaclass=SingletonMeta):
|
|
|
84
85
|
if logging_service:
|
|
85
86
|
logging_service(config)
|
|
86
87
|
|
|
88
|
+
# ========== 处理数据库服务 ==========
|
|
89
|
+
# 清空之前的待执行列表(防止热重载时重复)
|
|
90
|
+
cls._pending_async_db_setup = []
|
|
91
|
+
|
|
87
92
|
if database_service:
|
|
88
|
-
|
|
93
|
+
# 解析配置并区分同步/异步
|
|
94
|
+
items = [database_service] if isinstance(
|
|
95
|
+
database_service, tuple) else database_service
|
|
96
|
+
for item in items:
|
|
97
|
+
db_setup_func, db_name = item
|
|
98
|
+
if asyncio.iscoroutinefunction(db_setup_func):
|
|
99
|
+
# 如果是异步函数,加入待执行列表
|
|
100
|
+
logging.info(f"检测到异步数据库服务: {db_name},将在应用启动时初始化")
|
|
101
|
+
cls._pending_async_db_setup.append(item)
|
|
102
|
+
else:
|
|
103
|
+
# 如果是同步函数,立即执行
|
|
104
|
+
logging.info(f"执行同步数据库服务: {db_name}")
|
|
105
|
+
try:
|
|
106
|
+
db_setup_func(config, db_name)
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logging.error(
|
|
109
|
+
f"同步数据库服务 {db_name} 初始化失败: {e}", exc_info=True)
|
|
110
|
+
raise
|
|
89
111
|
|
|
90
112
|
# 创建组合生命周期管理器
|
|
91
113
|
@asynccontextmanager
|
|
@@ -93,14 +115,25 @@ class Services(metaclass=SingletonMeta):
|
|
|
93
115
|
# 1. 执行Services自身的初始化
|
|
94
116
|
instance = cls(config, app)
|
|
95
117
|
|
|
96
|
-
#
|
|
118
|
+
# ========== 执行挂起的异步数据库初始化 ==========
|
|
119
|
+
if cls._pending_async_db_setup:
|
|
120
|
+
logging.info("开始执行异步数据库初始化...")
|
|
121
|
+
for db_setup_func, db_name in cls._pending_async_db_setup:
|
|
122
|
+
try:
|
|
123
|
+
await db_setup_func(config, db_name)
|
|
124
|
+
logging.info(f"异步数据库服务 {db_name} 初始化成功")
|
|
125
|
+
except Exception as e:
|
|
126
|
+
logging.error(
|
|
127
|
+
f"异步数据库服务 {db_name} 初始化失败: {e}", exc_info=True)
|
|
128
|
+
raise
|
|
129
|
+
|
|
130
|
+
# ========== 初始化 MQ ==========
|
|
97
131
|
has_valid_listeners = bool(
|
|
98
132
|
rabbitmq_listeners and len(rabbitmq_listeners) > 0)
|
|
99
133
|
has_valid_senders = bool(
|
|
100
134
|
rabbitmq_senders and len(rabbitmq_senders) > 0)
|
|
101
135
|
|
|
102
136
|
try:
|
|
103
|
-
# 只有存在监听器或发送器时才初始化RabbitMQService
|
|
104
137
|
if has_valid_listeners or has_valid_senders:
|
|
105
138
|
await instance._setup_mq_async(
|
|
106
139
|
rabbitmq_listeners=rabbitmq_listeners if has_valid_listeners else None,
|
|
@@ -119,28 +152,18 @@ class Services(metaclass=SingletonMeta):
|
|
|
119
152
|
# 2. 执行用户定义的生命周期
|
|
120
153
|
if cls._user_lifespan:
|
|
121
154
|
async with cls._user_lifespan(app):
|
|
122
|
-
yield
|
|
155
|
+
yield
|
|
123
156
|
else:
|
|
124
|
-
yield
|
|
157
|
+
yield
|
|
125
158
|
|
|
126
159
|
# 3. 执行Services的关闭逻辑
|
|
127
160
|
await cls.shutdown()
|
|
128
161
|
logging.info("Services已关闭")
|
|
129
162
|
|
|
130
|
-
# 设置组合生命周期
|
|
131
163
|
app.router.lifespan_context = combined_lifespan
|
|
132
|
-
|
|
133
164
|
return app
|
|
134
165
|
|
|
135
|
-
|
|
136
|
-
def _setup_database_static(database_service, config):
|
|
137
|
-
"""静态方法:设置数据库服务"""
|
|
138
|
-
if isinstance(database_service, tuple):
|
|
139
|
-
db_setup, db_name = database_service
|
|
140
|
-
db_setup(config, db_name)
|
|
141
|
-
elif isinstance(database_service, list):
|
|
142
|
-
for db_setup, db_name in database_service:
|
|
143
|
-
db_setup(config, db_name)
|
|
166
|
+
# 移除了 _setup_database_static,因为逻辑已内联到 plugins 中
|
|
144
167
|
|
|
145
168
|
async def _setup_mq_async(
|
|
146
169
|
self,
|
|
@@ -149,16 +172,13 @@ class Services(metaclass=SingletonMeta):
|
|
|
149
172
|
has_listeners: bool = False,
|
|
150
173
|
has_senders: bool = False,
|
|
151
174
|
):
|
|
152
|
-
"""异步设置MQ
|
|
153
|
-
# ========== 只有需要使用MQ时才初始化 ==========
|
|
175
|
+
"""异步设置MQ相关服务"""
|
|
154
176
|
if not (has_listeners or has_senders):
|
|
155
177
|
logging.info("无RabbitMQ监听器/发送器配置,跳过RabbitMQService初始化")
|
|
156
178
|
return
|
|
157
179
|
|
|
158
|
-
# 仅当有监听器或发送器时,才执行RabbitMQService初始化
|
|
159
180
|
RabbitMQService.init(self._config, has_listeners, has_senders)
|
|
160
181
|
|
|
161
|
-
# 优化:等待连接池“存在且初始化完成”(避免提前执行后续逻辑)
|
|
162
182
|
start_time = asyncio.get_event_loop().time()
|
|
163
183
|
while not (RabbitMQService._connection_pool and RabbitMQService._connection_pool._initialized) and not RabbitMQService._is_shutdown:
|
|
164
184
|
if asyncio.get_event_loop().time() - start_time > 30:
|
|
@@ -166,10 +186,7 @@ class Services(metaclass=SingletonMeta):
|
|
|
166
186
|
logging.info("等待RabbitMQ连接池初始化...")
|
|
167
187
|
await asyncio.sleep(0.5)
|
|
168
188
|
|
|
169
|
-
# ========== 保留原有严格的发送器/监听器初始化判断 ==========
|
|
170
|
-
# 只有配置了发送器才执行发送器初始化
|
|
171
189
|
if has_senders and rabbitmq_senders:
|
|
172
|
-
# 判断是否有监听器,如果有遍历监听器列表,队列名一样将prefetch_count属性设置到发送器对象中
|
|
173
190
|
if has_listeners and rabbitmq_listeners:
|
|
174
191
|
for sender in rabbitmq_senders:
|
|
175
192
|
for listener in rabbitmq_listeners:
|
|
@@ -177,31 +194,25 @@ class Services(metaclass=SingletonMeta):
|
|
|
177
194
|
sender.prefetch_count = listener.prefetch_count
|
|
178
195
|
await self._setup_senders_async(rabbitmq_senders, has_listeners)
|
|
179
196
|
|
|
180
|
-
# 只有配置了监听器才执行监听器初始化
|
|
181
197
|
if has_listeners and rabbitmq_listeners:
|
|
182
198
|
await self._setup_listeners_async(rabbitmq_listeners, has_senders)
|
|
183
199
|
|
|
184
|
-
# 验证初始化结果
|
|
185
200
|
if has_listeners:
|
|
186
|
-
# 异步获取客户端数量(适配新的RabbitMQService)
|
|
187
201
|
listener_count = len(RabbitMQService._consumer_tasks)
|
|
188
202
|
logging.info(f"监听器初始化完成,共启动 {listener_count} 个消费者")
|
|
189
203
|
if listener_count == 0:
|
|
190
204
|
logging.warning("未成功初始化任何监听器,请检查配置或MQ服务状态")
|
|
191
205
|
|
|
192
206
|
async def _setup_senders_async(self, rabbitmq_senders, has_listeners: bool):
|
|
193
|
-
"""
|
|
207
|
+
"""设置发送器"""
|
|
194
208
|
Services._registered_senders = [
|
|
195
209
|
sender.queue_name for sender in rabbitmq_senders]
|
|
196
|
-
|
|
197
|
-
# 将是否有监听器的信息传递给RabbitMQService(异步调用)
|
|
198
210
|
await RabbitMQService.setup_senders(rabbitmq_senders, has_listeners)
|
|
199
|
-
# 更新已注册的发送器(从RabbitMQService获取实际注册的名称)
|
|
200
211
|
Services._registered_senders = RabbitMQService._sender_client_names
|
|
201
212
|
logging.info(f"已注册的RabbitMQ发送器: {Services._registered_senders}")
|
|
202
213
|
|
|
203
214
|
async def _setup_listeners_async(self, rabbitmq_listeners, has_senders: bool):
|
|
204
|
-
"""
|
|
215
|
+
"""设置监听器"""
|
|
205
216
|
await RabbitMQService.setup_listeners(rabbitmq_listeners, has_senders)
|
|
206
217
|
|
|
207
218
|
@classmethod
|
|
@@ -212,7 +223,7 @@ class Services(metaclass=SingletonMeta):
|
|
|
212
223
|
max_retries: int = 3,
|
|
213
224
|
retry_delay: float = 1.0, **kwargs
|
|
214
225
|
) -> None:
|
|
215
|
-
"""
|
|
226
|
+
"""发送消息"""
|
|
216
227
|
if not cls._initialized or not cls._loop:
|
|
217
228
|
logging.error("Services not properly initialized!")
|
|
218
229
|
raise ValueError("服务未正确初始化")
|
|
@@ -223,18 +234,15 @@ class Services(metaclass=SingletonMeta):
|
|
|
223
234
|
|
|
224
235
|
for attempt in range(max_retries):
|
|
225
236
|
try:
|
|
226
|
-
# 验证发送器是否注册
|
|
227
237
|
if queue_name not in cls._registered_senders:
|
|
228
238
|
cls._registered_senders = RabbitMQService._sender_client_names
|
|
229
239
|
if queue_name not in cls._registered_senders:
|
|
230
240
|
raise ValueError(f"发送器 {queue_name} 未注册")
|
|
231
241
|
|
|
232
|
-
# 获取发送器(适配新的异步get_sender方法)
|
|
233
242
|
sender = await RabbitMQService.get_sender(queue_name)
|
|
234
243
|
if not sender:
|
|
235
244
|
raise ValueError(f"发送器 '{queue_name}' 不存在或连接无效")
|
|
236
245
|
|
|
237
|
-
# 发送消息(调用RabbitMQService的异步send_message)
|
|
238
246
|
await RabbitMQService.send_message(data, queue_name, **kwargs)
|
|
239
247
|
logging.info(f"消息发送成功(尝试 {attempt+1}/{max_retries})")
|
|
240
248
|
return
|
|
@@ -244,25 +252,18 @@ class Services(metaclass=SingletonMeta):
|
|
|
244
252
|
logging.error(
|
|
245
253
|
f"消息发送失败(已尝试 {max_retries} 次): {str(e)}", exc_info=True)
|
|
246
254
|
raise
|
|
247
|
-
|
|
248
255
|
logging.warning(
|
|
249
|
-
f"消息发送失败(尝试 {attempt+1}/{max_retries}): {str(e)},"
|
|
250
|
-
f"{retry_delay}秒后重试..."
|
|
251
|
-
)
|
|
256
|
+
f"消息发送失败(尝试 {attempt+1}/{max_retries}): {str(e)},{retry_delay}秒后重试...")
|
|
252
257
|
await asyncio.sleep(retry_delay)
|
|
253
258
|
|
|
254
259
|
@classmethod
|
|
255
260
|
async def shutdown(cls):
|
|
256
|
-
"""
|
|
261
|
+
"""关闭所有服务"""
|
|
257
262
|
async with cls._shutdown_lock:
|
|
258
263
|
if RabbitMQService._is_shutdown:
|
|
259
264
|
logging.info("RabbitMQService已关闭,无需重复操作")
|
|
260
265
|
return
|
|
261
|
-
|
|
262
|
-
# 关闭RabbitMQ服务(异步调用,内部会关闭所有客户端+消费任务)
|
|
263
266
|
await RabbitMQService.shutdown()
|
|
264
|
-
|
|
265
|
-
# 清理全局状态
|
|
266
267
|
cls._initialized = False
|
|
267
268
|
cls._registered_senders.clear()
|
|
268
269
|
logging.info("所有服务已关闭")
|
sycommon/synacos/feign.py
CHANGED
|
@@ -25,7 +25,7 @@ async def feign(service_name, api_path, method='GET', params=None, headers=None,
|
|
|
25
25
|
# 初始化headers,确保是可修改的字典
|
|
26
26
|
headers = headers.copy() if headers else {}
|
|
27
27
|
if "x-traceId-header" not in headers:
|
|
28
|
-
headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.
|
|
28
|
+
headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.id
|
|
29
29
|
|
|
30
30
|
# 处理JSON请求的Content-Type
|
|
31
31
|
is_json_request = method.upper() in ["POST", "PUT", "PATCH"] and not (
|
sycommon/synacos/feign_client.py
CHANGED
|
@@ -28,7 +28,7 @@ def feign_client(
|
|
|
28
28
|
default_headers: Optional[Dict[str, str]] = None
|
|
29
29
|
):
|
|
30
30
|
default_headers = default_headers or {}
|
|
31
|
-
default_headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.
|
|
31
|
+
default_headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.id
|
|
32
32
|
|
|
33
33
|
def decorator(cls):
|
|
34
34
|
class FeignClient:
|
|
@@ -155,7 +155,7 @@ def feign_client(
|
|
|
155
155
|
def _build_headers(self, param_meta: Dict[str, Param], bound_args: Dict[str, Any], method_headers: Dict[str, str]) -> Dict[str, str]:
|
|
156
156
|
headers = self.default_headers.copy()
|
|
157
157
|
headers.update(method_headers)
|
|
158
|
-
headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.
|
|
158
|
+
headers["x-traceId-header"] = SYLogger.get_trace_id() or Snowflake.id
|
|
159
159
|
for name, meta in param_meta.items():
|
|
160
160
|
if isinstance(meta, Header) and name in bound_args:
|
|
161
161
|
value = bound_args[name]
|
sycommon/tools/snowflake.py
CHANGED
|
@@ -3,13 +3,29 @@ import threading
|
|
|
3
3
|
import socket
|
|
4
4
|
import hashlib
|
|
5
5
|
import random
|
|
6
|
-
|
|
6
|
+
import os
|
|
7
|
+
from typing import Optional, Type, Any
|
|
7
8
|
from os import environ
|
|
8
|
-
import
|
|
9
|
+
import psutil
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ClassProperty:
|
|
13
|
+
"""
|
|
14
|
+
自定义类属性描述符,替代 @classmethod + @property 的废弃写法
|
|
15
|
+
支持通过 类.属性 的方式访问,无需实例化
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, func):
|
|
19
|
+
self.func = func
|
|
20
|
+
|
|
21
|
+
def __get__(self, instance: Any, cls: Type) -> str:
|
|
22
|
+
# 调用传入的函数,并传入类本身作为第一个参数
|
|
23
|
+
return self.func(cls)
|
|
9
24
|
|
|
10
25
|
|
|
11
26
|
class Snowflake:
|
|
12
|
-
"""
|
|
27
|
+
"""雪花算法生成器(生产级优化版,无公网依赖,适配内网/K8s环境)"""
|
|
28
|
+
# 基础配置(可根据业务调整)
|
|
13
29
|
START_TIMESTAMP = 1388534400000 # 2014-01-01 00:00:00
|
|
14
30
|
SEQUENCE_BITS = 12
|
|
15
31
|
MACHINE_ID_BITS = 10
|
|
@@ -17,8 +33,10 @@ class Snowflake:
|
|
|
17
33
|
MAX_SEQUENCE = (1 << SEQUENCE_BITS) - 1
|
|
18
34
|
MACHINE_ID_SHIFT = SEQUENCE_BITS
|
|
19
35
|
TIMESTAMP_SHIFT = SEQUENCE_BITS + MACHINE_ID_BITS
|
|
36
|
+
CLOCK_BACKWARD_THRESHOLD = 5 # 容忍的时钟回拨阈值(毫秒)
|
|
37
|
+
_MAX_JAVA_LONG = 9223372036854775807 # Java Long最大值
|
|
20
38
|
|
|
21
|
-
#
|
|
39
|
+
# 类级别的单例实例(线程安全)
|
|
22
40
|
_instance = None
|
|
23
41
|
_instance_lock = threading.Lock()
|
|
24
42
|
|
|
@@ -27,22 +45,38 @@ class Snowflake:
|
|
|
27
45
|
初始化:优先使用传入的machine_id,否则自动从K8s环境获取
|
|
28
46
|
:param machine_id: 手动指定机器ID(None则自动计算)
|
|
29
47
|
"""
|
|
48
|
+
# 前置校验:确保雪花ID不会超过Java Long最大值
|
|
49
|
+
self._validate_timestamp_range()
|
|
50
|
+
|
|
30
51
|
# 自动计算K8s环境下的machine_id
|
|
31
52
|
if machine_id is None:
|
|
32
53
|
machine_id = self._get_k8s_machine_id()
|
|
33
54
|
|
|
55
|
+
# 校验machine_id合法性
|
|
34
56
|
if not (0 <= machine_id <= self.MAX_MACHINE_ID):
|
|
35
57
|
raise ValueError(f"机器ID必须在0~{self.MAX_MACHINE_ID}之间")
|
|
36
58
|
|
|
59
|
+
# 初始化核心参数
|
|
37
60
|
self.machine_id = machine_id
|
|
38
61
|
self.last_timestamp = -1
|
|
39
62
|
self.sequence = 0
|
|
40
63
|
self.lock = threading.Lock()
|
|
41
64
|
|
|
65
|
+
def _validate_timestamp_range(self):
|
|
66
|
+
"""校验当前时间戳是否在雪花ID支持的范围内,避免超过Java Long最大值"""
|
|
67
|
+
max_support_timestamp = self.START_TIMESTAMP + \
|
|
68
|
+
(1 << (64 - self.TIMESTAMP_SHIFT)) - 1
|
|
69
|
+
current_timestamp = self._get_current_timestamp()
|
|
70
|
+
if current_timestamp > max_support_timestamp:
|
|
71
|
+
raise RuntimeError(
|
|
72
|
+
f"当前时间戳({current_timestamp})超过雪花ID支持的最大时间戳({max_support_timestamp}),"
|
|
73
|
+
f"请调整START_TIMESTAMP或减少TIMESTAMP_SHIFT位数"
|
|
74
|
+
)
|
|
75
|
+
|
|
42
76
|
def _get_k8s_machine_id(self) -> int:
|
|
43
77
|
"""
|
|
44
|
-
从K8s环境自动计算唯一machine_id
|
|
45
|
-
优先级:POD_NAME > POD_IP > 容器内网IP
|
|
78
|
+
从K8s环境自动计算唯一machine_id(无公网依赖,多层兜底,降低重复风险):
|
|
79
|
+
优先级:POD_NAME > POD_IP > 容器内网IP(psutil读取) > 容器主机名 > 进程+时间+随机数(最终兜底)
|
|
46
80
|
"""
|
|
47
81
|
# 1. 优先读取K8s内置的POD_NAME(默认注入,优先级最高)
|
|
48
82
|
pod_name = environ.get("POD_NAME")
|
|
@@ -54,72 +88,73 @@ class Snowflake:
|
|
|
54
88
|
if pod_ip:
|
|
55
89
|
return self._hash_to_machine_id(pod_ip)
|
|
56
90
|
|
|
57
|
-
# 3. 兜底1:读取本机网卡获取内网IP
|
|
91
|
+
# 3. 兜底1:读取本机网卡获取内网IP(替换netifaces,使用psutil)
|
|
58
92
|
try:
|
|
59
93
|
local_ip = self._get_local_internal_ip()
|
|
60
94
|
if local_ip:
|
|
61
95
|
return self._hash_to_machine_id(local_ip)
|
|
62
|
-
|
|
63
|
-
# logger.warning("读取网卡信息成功,但未找到非回环内网IP")
|
|
64
|
-
pass
|
|
65
|
-
except Exception as e:
|
|
66
|
-
# logger.warning(f"读取本机网卡IP失败: {e},尝试使用主机名")
|
|
96
|
+
except Exception:
|
|
67
97
|
pass
|
|
68
98
|
|
|
69
99
|
# 4. 兜底2:获取容器主机名(K8s中默认等于Pod名称,保证唯一)
|
|
70
100
|
hostname = socket.gethostname()
|
|
71
101
|
if hostname:
|
|
72
|
-
# logger.info(
|
|
73
|
-
# f"未读取到POD_NAME/POD_IP/内网IP,使用主机名: {hostname}生成machine_id")
|
|
74
102
|
return self._hash_to_machine_id(hostname)
|
|
75
103
|
|
|
76
|
-
# 5.
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
return random_id
|
|
104
|
+
# 5. 最终兜底:增加熵值(进程ID+毫秒时间戳+随机数),大幅降低重复概率
|
|
105
|
+
fallback_text = f"{os.getpid()}_{int(time.time()*1000)}_{random.randint(0, 100000)}"
|
|
106
|
+
return self._hash_to_machine_id(fallback_text)
|
|
80
107
|
|
|
81
108
|
def _get_local_internal_ip(self) -> Optional[str]:
|
|
82
109
|
"""
|
|
83
|
-
读取本机网卡信息,获取非回环的内网IP
|
|
110
|
+
使用psutil读取本机网卡信息,获取非回环的内网IP(跨平台兼容,过滤lo/lo0等回环网卡)
|
|
84
111
|
:return: 内网IP字符串,失败返回None
|
|
85
112
|
"""
|
|
86
113
|
try:
|
|
87
|
-
#
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
114
|
+
# 遍历所有网卡接口
|
|
115
|
+
net_if_addrs = psutil.net_if_addrs()
|
|
116
|
+
for interface_name, addrs in net_if_addrs.items():
|
|
117
|
+
# 过滤回环/虚拟网卡(兼容lo、lo0、lo1、Loopback、virtual等)
|
|
118
|
+
if (interface_name.lower().startswith("lo")
|
|
119
|
+
or interface_name.lower() in ["loopback", "virtual"]):
|
|
120
|
+
continue
|
|
121
|
+
# 遍历该网卡的所有地址,优先返回第一个非回环IPv4
|
|
122
|
+
for addr in addrs:
|
|
123
|
+
if addr.family == psutil.AF_INET:
|
|
124
|
+
ip = addr.address
|
|
96
125
|
if ip and not ip.startswith('127.'):
|
|
97
|
-
# 可选:过滤docker0的默认地址段(根据实际内网段调整)
|
|
98
|
-
# if not ip.startswith('172.17.'):
|
|
99
126
|
return ip
|
|
100
127
|
return None
|
|
101
|
-
except
|
|
102
|
-
#
|
|
103
|
-
# logger.warning("未安装netifaces库,尝试降级方式获取IP")
|
|
128
|
+
except Exception:
|
|
129
|
+
# psutil调用失败,降级到纯内置方法
|
|
104
130
|
return self._get_local_ip_fallback()
|
|
105
131
|
|
|
106
132
|
def _get_local_ip_fallback(self) -> Optional[str]:
|
|
107
133
|
"""
|
|
108
|
-
|
|
134
|
+
增强版降级方案:纯Python内置方法,多维度获取内网IP(无第三方依赖)
|
|
109
135
|
"""
|
|
136
|
+
# 方案1:socket绑定内网地址(避免访问公网)
|
|
110
137
|
try:
|
|
111
|
-
# 创建socket但不连接任何地址,仅绑定到本地
|
|
112
138
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
113
|
-
|
|
114
|
-
s.bind(('', 0))
|
|
139
|
+
s.connect(("192.168.0.1", 80))
|
|
115
140
|
local_ip = s.getsockname()[0]
|
|
116
141
|
s.close()
|
|
117
|
-
# 过滤回环地址
|
|
118
142
|
if not local_ip.startswith('127.'):
|
|
119
143
|
return local_ip
|
|
120
|
-
return None
|
|
121
144
|
except Exception:
|
|
122
|
-
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
# 方案2:遍历所有本地IP(通过hostname解析)
|
|
148
|
+
try:
|
|
149
|
+
hostname = socket.gethostname()
|
|
150
|
+
ip_list = socket.gethostbyname_ex(hostname)[2]
|
|
151
|
+
for ip in ip_list:
|
|
152
|
+
if not ip.startswith('127.'):
|
|
153
|
+
return ip
|
|
154
|
+
except Exception:
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
return None
|
|
123
158
|
|
|
124
159
|
def _hash_to_machine_id(self, text: str) -> int:
|
|
125
160
|
"""将字符串哈希后取模,得到0~1023的machine_id(保证分布均匀)"""
|
|
@@ -128,42 +163,60 @@ class Snowflake:
|
|
|
128
163
|
return hash_int % self.MAX_MACHINE_ID
|
|
129
164
|
|
|
130
165
|
def _get_current_timestamp(self) -> int:
|
|
166
|
+
"""获取当前毫秒级时间戳"""
|
|
131
167
|
return int(time.time() * 1000)
|
|
132
168
|
|
|
133
169
|
def _wait_next_millisecond(self, current_timestamp: int) -> int:
|
|
170
|
+
"""等待直到下一个毫秒,避免序列耗尽"""
|
|
134
171
|
while current_timestamp <= self.last_timestamp:
|
|
135
172
|
current_timestamp = self._get_current_timestamp()
|
|
136
173
|
return current_timestamp
|
|
137
174
|
|
|
138
175
|
def generate_id(self) -> int:
|
|
139
|
-
|
|
140
|
-
|
|
176
|
+
"""生成雪花ID(生产级优化:优化锁粒度,容忍轻微时钟回拨)"""
|
|
177
|
+
current_timestamp = self._get_current_timestamp()
|
|
141
178
|
|
|
142
|
-
|
|
179
|
+
# 1. 处理时钟回拨:容忍CLOCK_BACKWARD_THRESHOLD内的微调,超过则抛异常
|
|
180
|
+
time_diff = self.last_timestamp - current_timestamp
|
|
181
|
+
if time_diff > 0:
|
|
182
|
+
if time_diff > self.CLOCK_BACKWARD_THRESHOLD:
|
|
143
183
|
raise RuntimeError(
|
|
144
|
-
f"时钟回拨检测:当前时间戳({current_timestamp}) < 上一次时间戳({self.last_timestamp})"
|
|
184
|
+
f"时钟回拨检测:当前时间戳({current_timestamp}) < 上一次时间戳({self.last_timestamp}),"
|
|
185
|
+
f"差值{time_diff}ms(阈值{self.CLOCK_BACKWARD_THRESHOLD}ms)"
|
|
145
186
|
)
|
|
187
|
+
# 轻微回拨:等待时钟追上
|
|
188
|
+
current_timestamp = self._wait_next_millisecond(current_timestamp)
|
|
146
189
|
|
|
147
|
-
|
|
190
|
+
# 2. 优化锁粒度:仅在同一毫秒内递增序列时加锁
|
|
191
|
+
if current_timestamp != self.last_timestamp:
|
|
192
|
+
with self.lock:
|
|
193
|
+
self.last_timestamp = current_timestamp
|
|
194
|
+
self.sequence = 0
|
|
195
|
+
else:
|
|
196
|
+
with self.lock:
|
|
148
197
|
self.sequence = (self.sequence + 1) & self.MAX_SEQUENCE
|
|
149
198
|
if self.sequence == 0:
|
|
150
199
|
current_timestamp = self._wait_next_millisecond(
|
|
151
200
|
current_timestamp)
|
|
152
|
-
|
|
153
|
-
self.sequence = 0
|
|
201
|
+
self.last_timestamp = current_timestamp
|
|
154
202
|
|
|
155
|
-
|
|
203
|
+
# 3. 计算最终雪花ID
|
|
204
|
+
snowflake_id = (
|
|
205
|
+
((current_timestamp - self.START_TIMESTAMP) << self.TIMESTAMP_SHIFT)
|
|
206
|
+
| (self.machine_id << self.MACHINE_ID_SHIFT)
|
|
207
|
+
| self.sequence
|
|
208
|
+
)
|
|
156
209
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
)
|
|
210
|
+
# 最终校验:确保不超过Java Long最大值
|
|
211
|
+
if snowflake_id > self._MAX_JAVA_LONG:
|
|
212
|
+
raise RuntimeError(
|
|
213
|
+
f"生成的雪花ID({snowflake_id})超过Java Long最大值({self._MAX_JAVA_LONG})")
|
|
162
214
|
|
|
163
|
-
|
|
215
|
+
return snowflake_id
|
|
164
216
|
|
|
165
217
|
@staticmethod
|
|
166
218
|
def parse_id(snowflake_id: int) -> dict:
|
|
219
|
+
"""解析雪花ID,返回生成时间、机器ID、序列等信息"""
|
|
167
220
|
from datetime import datetime
|
|
168
221
|
sequence = snowflake_id & Snowflake.MAX_SEQUENCE
|
|
169
222
|
machine_id = (snowflake_id >>
|
|
@@ -177,45 +230,71 @@ class Snowflake:
|
|
|
177
230
|
"snowflake_id": snowflake_id,
|
|
178
231
|
"generate_time": generate_time,
|
|
179
232
|
"machine_id": machine_id,
|
|
180
|
-
"sequence": sequence
|
|
233
|
+
"sequence": sequence,
|
|
234
|
+
"is_java_long_safe": snowflake_id <= Snowflake._MAX_JAVA_LONG
|
|
181
235
|
}
|
|
182
236
|
|
|
183
237
|
@classmethod
|
|
184
238
|
def next_id(cls) -> str:
|
|
185
239
|
"""
|
|
186
|
-
生成雪花ID
|
|
240
|
+
生成雪花ID(线程安全单例模式,避免重复创建实例,锁内完成所有初始化)
|
|
187
241
|
:return: 雪花ID字符串
|
|
188
242
|
"""
|
|
189
|
-
# 单例模式创建实例
|
|
190
243
|
if cls._instance is None:
|
|
191
244
|
with cls._instance_lock:
|
|
192
245
|
if cls._instance is None:
|
|
246
|
+
# 锁内初始化,避免多线程重复计算machine_id
|
|
193
247
|
cls._instance = cls()
|
|
194
|
-
# 生成ID并转为字符串返回
|
|
195
248
|
return str(cls._instance.generate_id())
|
|
196
249
|
|
|
250
|
+
@ClassProperty
|
|
251
|
+
def id(cls) -> str:
|
|
252
|
+
"""
|
|
253
|
+
直接通过 `Snowflake.id` 属性生成雪花ID(兼容Python 3.11+)
|
|
254
|
+
:return: 雪花ID字符串
|
|
255
|
+
"""
|
|
256
|
+
return cls.next_id()
|
|
197
257
|
|
|
198
|
-
if __name__ == "__main__":
|
|
199
|
-
# 生成1000个ID并验证
|
|
200
|
-
id_set = set() # 用于检测重复ID
|
|
201
|
-
_MAX_JAVA_LONG = 9223372036854775807
|
|
202
|
-
|
|
203
|
-
for i in range(1000):
|
|
204
|
-
id_str = Snowflake.next_id()
|
|
205
|
-
id_num = int(id_str)
|
|
206
|
-
|
|
207
|
-
# 验证ID不超过Java long最大值
|
|
208
|
-
assert id_num <= _MAX_JAVA_LONG, f"ID超过Java long最大值: {id_num}"
|
|
209
|
-
|
|
210
|
-
# 验证ID不重复
|
|
211
|
-
assert id_str not in id_set, f"重复生成ID: {id_str}"
|
|
212
|
-
id_set.add(id_str)
|
|
213
|
-
|
|
214
|
-
# 每100个ID打印一次解析结果
|
|
215
|
-
if i % 100 == 0:
|
|
216
|
-
parse_result = Snowflake.parse_id(id_num)
|
|
217
|
-
print(f"生成ID: {id_str}")
|
|
218
|
-
print(f"解析结果: {parse_result}")
|
|
219
|
-
print("-" * 50)
|
|
220
258
|
|
|
221
|
-
|
|
259
|
+
if __name__ == "__main__":
|
|
260
|
+
print("=== 生产级雪花算法ID生成测试 ===")
|
|
261
|
+
# 1. 基础生成测试
|
|
262
|
+
id1 = Snowflake.id
|
|
263
|
+
id2 = Snowflake.id
|
|
264
|
+
id3 = Snowflake.id
|
|
265
|
+
print(f"生成ID1: {id1}")
|
|
266
|
+
print(f"生成ID2: {id2}")
|
|
267
|
+
print(f"生成ID3: {id3}")
|
|
268
|
+
print(f"ID是否唯一: {len({id1, id2, id3}) == 3}")
|
|
269
|
+
|
|
270
|
+
# 2. 解析ID信息
|
|
271
|
+
print("\n=== 雪花ID解析 ===")
|
|
272
|
+
parse_info = Snowflake.parse_id(int(id3))
|
|
273
|
+
for key, value in parse_info.items():
|
|
274
|
+
print(f"{key}: {value}")
|
|
275
|
+
|
|
276
|
+
# 3. 批量唯一性验证(10000个ID)
|
|
277
|
+
print("\n=== 批量唯一性验证(10000个)===")
|
|
278
|
+
id_set = set()
|
|
279
|
+
duplicate_count = 0
|
|
280
|
+
for i in range(10000):
|
|
281
|
+
snow_id = Snowflake.id
|
|
282
|
+
if snow_id in id_set:
|
|
283
|
+
duplicate_count += 1
|
|
284
|
+
id_set.add(snow_id)
|
|
285
|
+
print(f"总生成数量: 10000")
|
|
286
|
+
print(f"唯一ID数量: {len(id_set)}")
|
|
287
|
+
print(f"重复ID数量: {duplicate_count}")
|
|
288
|
+
print(f"机器ID: {Snowflake._instance.machine_id}")
|
|
289
|
+
|
|
290
|
+
# 4. 高并发测试
|
|
291
|
+
import concurrent.futures
|
|
292
|
+
print("\n=== 高并发测试(100线程)===")
|
|
293
|
+
id_set_concurrent = set()
|
|
294
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
|
|
295
|
+
futures = [executor.submit(lambda: Snowflake.id) for _ in range(10000)]
|
|
296
|
+
for future in concurrent.futures.as_completed(futures):
|
|
297
|
+
id_set_concurrent.add(future.result())
|
|
298
|
+
print(f"高并发生成唯一ID数量: {len(id_set_concurrent)}")
|
|
299
|
+
|
|
300
|
+
print("\n=== 生产级雪花算法验证通过 ===")
|
|
@@ -1,25 +1,25 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sycommon-python-lib
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.55b1
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Python: >=3.10
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
7
7
|
Requires-Dist: aio-pika>=9.5.8
|
|
8
8
|
Requires-Dist: aiohttp>=3.13.2
|
|
9
|
+
Requires-Dist: aiomysql>=0.3.2
|
|
9
10
|
Requires-Dist: decorator>=5.2.1
|
|
10
|
-
Requires-Dist: fastapi>=0.
|
|
11
|
-
Requires-Dist: kafka-python>=2.
|
|
11
|
+
Requires-Dist: fastapi>=0.127.0
|
|
12
|
+
Requires-Dist: kafka-python>=2.3.0
|
|
12
13
|
Requires-Dist: loguru>=0.7.3
|
|
13
14
|
Requires-Dist: mysql-connector-python>=9.5.0
|
|
14
15
|
Requires-Dist: nacos-sdk-python<3.0,>=2.0.9
|
|
15
|
-
Requires-Dist:
|
|
16
|
-
Requires-Dist: pydantic>=2.12.
|
|
16
|
+
Requires-Dist: psutil>=7.1.3
|
|
17
|
+
Requires-Dist: pydantic>=2.12.5
|
|
17
18
|
Requires-Dist: python-dotenv>=1.2.1
|
|
18
19
|
Requires-Dist: pyyaml>=6.0.3
|
|
19
|
-
Requires-Dist: sqlalchemy>=2.0.
|
|
20
|
-
Requires-Dist: starlette>=0.
|
|
21
|
-
Requires-Dist:
|
|
22
|
-
Requires-Dist: uvicorn>=0.38.0
|
|
20
|
+
Requires-Dist: sqlalchemy[asyncio]>=2.0.45
|
|
21
|
+
Requires-Dist: starlette>=0.50.0
|
|
22
|
+
Requires-Dist: uvicorn>=0.40.0
|
|
23
23
|
|
|
24
24
|
# sycommon-python-lib
|
|
25
25
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
command/cli.py,sha256=bP2LCLkRvfETIwWkVD70q5xFxMI4D3BpH09Ws1f-ENc,5849
|
|
2
2
|
sycommon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
sycommon/services.py,sha256=
|
|
3
|
+
sycommon/services.py,sha256=qqNmSpg8JwTcTZPtY5LyW0ttrrEbeCvh4yy09u9OMNg,11488
|
|
4
4
|
sycommon/config/Config.py,sha256=9yO5b8WfvEDvkyrGrlwrLFasgh_-MjcEvGF20Gz5Xo4,3041
|
|
5
5
|
sycommon/config/DatabaseConfig.py,sha256=ILiUuYT9_xJZE2W-RYuC3JCt_YLKc1sbH13-MHIOPhg,804
|
|
6
6
|
sycommon/config/EmbeddingConfig.py,sha256=gPKwiDYbeu1GpdIZXMmgqM7JqBIzCXi0yYuGRLZooMI,362
|
|
@@ -8,6 +8,8 @@ sycommon/config/LLMConfig.py,sha256=yU-aIqePIeF6msfRVEtGq7SXZVDfHyTi6JduKjhMO_4,
|
|
|
8
8
|
sycommon/config/MQConfig.py,sha256=_RDcmIdyWKjmgM5ZnriOoI-DpaxgXs7CD0awdAD6z88,252
|
|
9
9
|
sycommon/config/RerankerConfig.py,sha256=dohekaY_eTynmMimIuKHBYGXXQO6rJjSkm94OPLuMik,322
|
|
10
10
|
sycommon/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
+
sycommon/database/async_base_db_service.py,sha256=w6ONUiTtF4-bXRnkBt9QpL9BAy0XUDbQG7F9Hf2rfjw,1337
|
|
12
|
+
sycommon/database/async_database_service.py,sha256=4Ag5PH6DFEcJOXR8MRF9V_Jho5uCoU9Ibo3PqulDsXw,3916
|
|
11
13
|
sycommon/database/base_db_service.py,sha256=J5ELHMNeGfzA6zVcASPSPZ0XNKrRY3_gdGmVkZw3Mto,946
|
|
12
14
|
sycommon/database/database_service.py,sha256=mun5vgM7nkuH6_UyHLHqQ2Qk_5gRgMxJu4_obIKLT6o,3253
|
|
13
15
|
sycommon/health/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -15,7 +17,8 @@ sycommon/health/health_check.py,sha256=EhfbhspRpQiKJaxdtE-PzpKQO_ucaFKtQxIm16F5M
|
|
|
15
17
|
sycommon/health/metrics.py,sha256=fHqO73JuhoZkNPR-xIlxieXiTCvttq-kG-tvxag1s1s,268
|
|
16
18
|
sycommon/health/ping.py,sha256=FTlnIKk5y1mPfS1ZGOeT5IM_2udF5aqVLubEtuBp18M,250
|
|
17
19
|
sycommon/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
|
-
sycommon/logging/
|
|
20
|
+
sycommon/logging/async_sql_logger.py,sha256=_OY36XkUm__U3NhMgiecy-qd-nptZ_0gpE3J8lGAr58,2619
|
|
21
|
+
sycommon/logging/kafka_log.py,sha256=viqJ2hDqnyX5eUKkhIhU__kytIwe6nLuHIAFGcaRpUI,21118
|
|
19
22
|
sycommon/logging/logger_wrapper.py,sha256=TiHsrIIHiQMzXgXK12-0KIpU9GhwQJOoHslakzmq2zc,357
|
|
20
23
|
sycommon/logging/sql_logger.py,sha256=aEU3OGnI_51Tjyuuf4FpUi9KPTceFRuKAOyQbPzGhzM,2021
|
|
21
24
|
sycommon/middleware/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -27,7 +30,7 @@ sycommon/middleware/middleware.py,sha256=SzZ4wufSNdwC4Ppw99TE7a6AVGkrZRc55NHSrA3
|
|
|
27
30
|
sycommon/middleware/monitor_memory.py,sha256=pYRK-wRuDd6enSg9Pf8tQxPdYQS6S0AyjyXeKFRLKEs,628
|
|
28
31
|
sycommon/middleware/mq.py,sha256=4wBqiT5wJGcrfjk2GSr0_U3TStBxoNpHTzcRxVlMEHE,183
|
|
29
32
|
sycommon/middleware/timeout.py,sha256=fImlAPLm4Oa8N9goXtT_0os1GZPCi9F92OgXU81DgDU,656
|
|
30
|
-
sycommon/middleware/traceid.py,sha256=
|
|
33
|
+
sycommon/middleware/traceid.py,sha256=FLZTxVAIboZvqK_S69eReeIzZGUDIVy6KTA1kZBoRyI,6908
|
|
31
34
|
sycommon/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
35
|
sycommon/models/base_http.py,sha256=EICAAibx3xhjBsLqm35Mi3DCqxp0FME4rD_3iQVjT_E,3051
|
|
33
36
|
sycommon/models/log.py,sha256=rZpj6VkDRxK3B6H7XSeWdYZshU8F0Sks8bq1p6pPlDw,500
|
|
@@ -44,16 +47,16 @@ sycommon/sse/sse.py,sha256=__CfWEcYxOxQ-HpLor4LTZ5hLWqw9-2X7CngqbVHsfw,10128
|
|
|
44
47
|
sycommon/synacos/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
48
|
sycommon/synacos/example.py,sha256=61XL03tU8WTNOo3FUduf93F2fAwah1S0lbH1ufhRhRk,5739
|
|
46
49
|
sycommon/synacos/example2.py,sha256=adUaru3Hy482KrOA17DfaC4nwvLj8etIDS_KrWLWmCU,4811
|
|
47
|
-
sycommon/synacos/feign.py,sha256=
|
|
48
|
-
sycommon/synacos/feign_client.py,sha256=
|
|
50
|
+
sycommon/synacos/feign.py,sha256=xvyH_1no6gsggO3YYB0_88NWNA26odbQ_G-2MjApif0,8016
|
|
51
|
+
sycommon/synacos/feign_client.py,sha256=PYjTrnqMc_Jl6Wnpiz8-PFozCjPk6VGnPWv29JefL14,15421
|
|
49
52
|
sycommon/synacos/nacos_service.py,sha256=tyh_JOjjoCGiKCr1xfU7MAmu7dDQCZmTzmYsSqNjiQY,35465
|
|
50
53
|
sycommon/synacos/param.py,sha256=KcfSkxnXOa0TGmCjY8hdzU9pzUsA8-4PeyBKWI2-568,1765
|
|
51
54
|
sycommon/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
55
|
sycommon/tools/docs.py,sha256=OPj2ETheuWjXLyaXtaZPbwmJKfJaYXV5s4XMVAUNrms,1607
|
|
53
|
-
sycommon/tools/snowflake.py,sha256=
|
|
56
|
+
sycommon/tools/snowflake.py,sha256=lVEe5mNCOgz5OqGQpf5_nXaGnRJlI2STX2s-ppTtanA,11947
|
|
54
57
|
sycommon/tools/timing.py,sha256=OiiE7P07lRoMzX9kzb8sZU9cDb0zNnqIlY5pWqHcnkY,2064
|
|
55
|
-
sycommon_python_lib-0.1.
|
|
56
|
-
sycommon_python_lib-0.1.
|
|
57
|
-
sycommon_python_lib-0.1.
|
|
58
|
-
sycommon_python_lib-0.1.
|
|
59
|
-
sycommon_python_lib-0.1.
|
|
58
|
+
sycommon_python_lib-0.1.55b1.dist-info/METADATA,sha256=5TLw8YgSNN_ByzdLmVQ5RBoP3xnSAdhnVlRtY90Gp-4,7086
|
|
59
|
+
sycommon_python_lib-0.1.55b1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
60
|
+
sycommon_python_lib-0.1.55b1.dist-info/entry_points.txt,sha256=q_h2nbvhhmdnsOUZEIwpuoDjaNfBF9XqppDEmQn9d_A,46
|
|
61
|
+
sycommon_python_lib-0.1.55b1.dist-info/top_level.txt,sha256=98CJ-cyM2WIKxLz-Pf0AitWLhJyrfXvyY8slwjTXNuc,17
|
|
62
|
+
sycommon_python_lib-0.1.55b1.dist-info/RECORD,,
|
|
File without changes
|
{sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{sycommon_python_lib-0.1.55a0.dist-info → sycommon_python_lib-0.1.55b1.dist-info}/top_level.txt
RENAMED
|
File without changes
|