sycommon-python-lib 0.1.55b0__py3-none-any.whl → 0.1.55b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ from contextlib import asynccontextmanager
2
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
3
+ from sycommon.config.Config import SingletonMeta
4
+ from sycommon.database.async_database_service import AsyncDatabaseService
5
+ from sycommon.logging.kafka_log import SYLogger
6
+
7
+
8
+ class AsyncBaseDBService(metaclass=SingletonMeta):
9
+ """数据库操作基础服务类,封装异步会话管理功能"""
10
+
11
+ def __init__(self):
12
+ # 获取异步引擎 (假设 DatabaseService.engine() 返回的是 AsyncEngine)
13
+ self.engine = AsyncDatabaseService.engine()
14
+
15
+ # 创建异步 Session 工厂
16
+ # class_=AsyncSession 是必须的,用于指定生成的是异步会话
17
+ self.Session = async_sessionmaker(
18
+ bind=self.engine,
19
+ class_=AsyncSession,
20
+ expire_on_commit=False
21
+ )
22
+
23
+ @asynccontextmanager
24
+ async def session(self):
25
+ """
26
+ 异步数据库会话上下文管理器
27
+ 自动处理会话的创建、提交、回滚和关闭
28
+ """
29
+ async with self.Session() as session:
30
+ try:
31
+ yield session
32
+ await session.commit()
33
+ except Exception as e:
34
+ await session.rollback()
35
+ SYLogger.error(f"Database operation failed: {str(e)}")
36
+ raise
@@ -0,0 +1,96 @@
1
+ from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
2
+ from sqlalchemy import text
3
+
4
+ from sycommon.config.Config import SingletonMeta
5
+ from sycommon.config.DatabaseConfig import DatabaseConfig, convert_dict_keys
6
+ from sycommon.logging.kafka_log import SYLogger
7
+ from sycommon.logging.async_sql_logger import AsyncSQLTraceLogger
8
+ from sycommon.synacos.nacos_service import NacosService
9
+
10
+
11
+ class AsyncDatabaseService(metaclass=SingletonMeta):
12
+ _engine = None
13
+
14
+ @staticmethod
15
+ async def setup_database(config: dict, shareConfigKey: str):
16
+ common = NacosService(config).share_configs.get(shareConfigKey, {})
17
+ if common and common.get('spring', {}).get('datasource', None):
18
+ databaseConfig = common.get('spring', {}).get('datasource', None)
19
+ converted_dict = convert_dict_keys(databaseConfig)
20
+ db_config = DatabaseConfig.model_validate(converted_dict)
21
+
22
+ # 初始化 DatabaseConnector (传入配置)
23
+ connector = AsyncDatabaseConnector(db_config)
24
+
25
+ # 赋值 engine
26
+ AsyncDatabaseService._engine = connector.engine
27
+
28
+ # 执行异步测试连接
29
+ if not await connector.test_connection():
30
+ raise Exception("Database connection test failed")
31
+
32
+ @staticmethod
33
+ def engine():
34
+ return AsyncDatabaseService._engine
35
+
36
+
37
+ class AsyncDatabaseConnector(metaclass=SingletonMeta):
38
+ def __init__(self, db_config: DatabaseConfig):
39
+ # 从 DatabaseConfig 中提取数据库连接信息
40
+ self.db_user = db_config.username
41
+ self.db_password = db_config.password
42
+
43
+ # 提取 URL 中的主机、端口和数据库名
44
+ url_parts = db_config.url.split('//')[1].split('/')
45
+ host_port = url_parts[0].split(':')
46
+ self.db_host = host_port[0]
47
+ self.db_port = host_port[1]
48
+ self.db_name = url_parts[1].split('?')[0]
49
+
50
+ # 提取 URL 中的参数
51
+ params_str = url_parts[1].split('?')[1] if len(
52
+ url_parts[1].split('?')) > 1 else ''
53
+ params = {}
54
+ for param in params_str.split('&'):
55
+ if param:
56
+ key, value = param.split('=')
57
+ params[key] = value
58
+
59
+ # 在params中去掉指定的参数
60
+ for key in ['useUnicode', 'characterEncoding', 'serverTimezone', 'zeroDateTimeBehavior']:
61
+ if key in params:
62
+ del params[key]
63
+
64
+ # 构建数据库连接 URL
65
+ # 注意:这里将 mysqlconnector 替换为 aiomysql 以支持异步
66
+ self.db_url = f'mysql+aiomysql://{self.db_user}:{self.db_password}@{self.db_host}:{self.db_port}/{self.db_name}'
67
+
68
+ SYLogger.info(f"Database URL: {self.db_url}")
69
+
70
+ # 优化连接池配置
71
+ # 使用 create_async_engine 替代 create_engine
72
+ self.engine = create_async_engine(
73
+ self.db_url,
74
+ connect_args=params,
75
+ pool_size=10, # 连接池大小
76
+ max_overflow=20, # 最大溢出连接数
77
+ pool_timeout=30, # 连接超时时间(秒)
78
+ pool_recycle=3600, # 连接回收时间(秒)
79
+ pool_pre_ping=True, # 每次获取连接前检查连接是否有效
80
+ echo=False, # 打印 SQL 语句
81
+ )
82
+
83
+ # 注册 SQL 日志拦截器 (注意:SQLTraceLogger 需要支持异步引擎,或者您可能需要调整日志逻辑)
84
+ # 假设 SQLTraceLogger.setup_sql_logging 能够处理 AsyncEngine
85
+ AsyncSQLTraceLogger.setup_sql_logging(self.engine)
86
+
87
+ async def test_connection(self):
88
+ try:
89
+ # 异步上下文管理器
90
+ async with self.engine.connect() as connection:
91
+ # 执行简单查询
92
+ await connection.execute(text("SELECT 1"))
93
+ return True
94
+ except Exception as e:
95
+ SYLogger.error(f"Database connection test failed: {e}")
96
+ return False
@@ -0,0 +1,65 @@
1
+ from sqlalchemy import event
2
+ from sqlalchemy.ext.asyncio import AsyncEngine
3
+ from sycommon.logging.kafka_log import SYLogger
4
+ import time
5
+ from datetime import datetime
6
+ from decimal import Decimal
7
+
8
+
9
+ class AsyncSQLTraceLogger:
10
+ @staticmethod
11
+ def setup_sql_logging(engine):
12
+ """
13
+ 为 SQLAlchemy 异步引擎注册事件监听器
14
+ 注意:必须监听 engine.sync_engine,而不能直接监听 AsyncEngine
15
+ """
16
+ def serialize_params(params):
17
+ """处理特殊类型参数的序列化"""
18
+ if isinstance(params, (list, tuple)):
19
+ return [serialize_params(p) for p in params]
20
+ elif isinstance(params, dict):
21
+ return {k: serialize_params(v) for k, v in params.items()}
22
+ elif isinstance(params, datetime):
23
+ return params.isoformat()
24
+ elif isinstance(params, Decimal):
25
+ return float(params)
26
+ else:
27
+ return params
28
+
29
+ # ========== 核心修改 ==========
30
+ # 必须通过 engine.sync_engine 来获取底层的同步引擎进行监听
31
+ target = engine.sync_engine
32
+
33
+ @event.listens_for(target, "after_cursor_execute")
34
+ def after_cursor_execute(
35
+ conn, cursor, statement, parameters, context, executemany
36
+ ):
37
+ try:
38
+ # 从连接选项中获取开始时间
39
+ # conn 在这里是同步连接对象
40
+ start_time = conn.info.get('_start_time') or \
41
+ conn._execution_options.get("_start_time", time.time())
42
+
43
+ execution_time = (time.time() - start_time) * 1000
44
+
45
+ sql_log = {
46
+ "type": "SQL",
47
+ "statement": statement,
48
+ "parameters": serialize_params(parameters),
49
+ "execution_time_ms": round(execution_time, 2),
50
+ }
51
+
52
+ # 注意:SYLogger.info 必须是线程安全的或非阻塞的,否则可能影响异步性能
53
+ SYLogger.info(f"SQL执行: {sql_log}")
54
+ except Exception as e:
55
+ SYLogger.error(f"SQL日志处理失败: {str(e)}")
56
+
57
+ @event.listens_for(target, "before_cursor_execute")
58
+ def before_cursor_execute(
59
+ conn, cursor, statement, parameters, context, executemany
60
+ ):
61
+ try:
62
+ # 记录开始时间到 execution_options
63
+ conn = conn.execution_options(_start_time=time.time())
64
+ except Exception as e:
65
+ SYLogger.error(f"SQL开始时间记录失败: {str(e)}")
sycommon/services.py CHANGED
@@ -23,6 +23,9 @@ class Services(metaclass=SingletonMeta):
23
23
  _user_lifespan: Optional[Callable] = None
24
24
  _shutdown_lock: asyncio.Lock = asyncio.Lock()
25
25
 
26
+ # 用于存储待执行的异步数据库初始化任务
27
+ _pending_async_db_setup: List[Tuple[Callable, str]] = []
28
+
26
29
  def __init__(self, config: dict, app: FastAPI):
27
30
  if not Services._config:
28
31
  Services._config = config
@@ -48,25 +51,24 @@ class Services(metaclass=SingletonMeta):
48
51
  nacos_service: Optional[Callable[[dict], None]] = None,
49
52
  logging_service: Optional[Callable[[dict], None]] = None,
50
53
  database_service: Optional[Union[
51
- Tuple[Callable[[dict, str], None], str],
52
- List[Tuple[Callable[[dict, str], None], str]]
54
+ Tuple[Callable, str],
55
+ List[Tuple[Callable, str]]
53
56
  ]] = None,
54
57
  rabbitmq_listeners: Optional[List[RabbitMQListenerConfig]] = None,
55
58
  rabbitmq_senders: Optional[List[RabbitMQSendConfig]] = None
56
59
  ) -> FastAPI:
57
60
  load_dotenv()
58
- # 保存应用实例和配置
59
61
  cls._app = app
60
62
  cls._config = config
61
63
  cls._user_lifespan = app.router.lifespan_context
62
- # 设置文档
64
+
63
65
  applications.get_swagger_ui_html = custom_swagger_ui_html
64
66
  applications.get_redoc_html = custom_redoc_html
65
- # 设置app.state host, port
67
+
66
68
  if not cls._config:
67
69
  config = yaml.safe_load(open('app.yaml', 'r', encoding='utf-8'))
68
70
  cls._config = config
69
- # 使用config
71
+
70
72
  app.state.config = {
71
73
  "host": cls._config.get('Host', '0.0.0.0'),
72
74
  "port": cls._config.get('Port', 8080),
@@ -74,7 +76,6 @@ class Services(metaclass=SingletonMeta):
74
76
  "h11_max_incomplete_event_size": cls._config.get('H11MaxIncompleteEventSize', 1024 * 1024 * 10)
75
77
  }
76
78
 
77
- # 立即配置非异步服务(在应用启动前)
78
79
  if middleware:
79
80
  middleware(app, config)
80
81
 
@@ -84,8 +85,29 @@ class Services(metaclass=SingletonMeta):
84
85
  if logging_service:
85
86
  logging_service(config)
86
87
 
88
+ # ========== 处理数据库服务 ==========
89
+ # 清空之前的待执行列表(防止热重载时重复)
90
+ cls._pending_async_db_setup = []
91
+
87
92
  if database_service:
88
- cls._setup_database_static(database_service, config)
93
+ # 解析配置并区分同步/异步
94
+ items = [database_service] if isinstance(
95
+ database_service, tuple) else database_service
96
+ for item in items:
97
+ db_setup_func, db_name = item
98
+ if asyncio.iscoroutinefunction(db_setup_func):
99
+ # 如果是异步函数,加入待执行列表
100
+ logging.info(f"检测到异步数据库服务: {db_name},将在应用启动时初始化")
101
+ cls._pending_async_db_setup.append(item)
102
+ else:
103
+ # 如果是同步函数,立即执行
104
+ logging.info(f"执行同步数据库服务: {db_name}")
105
+ try:
106
+ db_setup_func(config, db_name)
107
+ except Exception as e:
108
+ logging.error(
109
+ f"同步数据库服务 {db_name} 初始化失败: {e}", exc_info=True)
110
+ raise
89
111
 
90
112
  # 创建组合生命周期管理器
91
113
  @asynccontextmanager
@@ -93,14 +115,25 @@ class Services(metaclass=SingletonMeta):
93
115
  # 1. 执行Services自身的初始化
94
116
  instance = cls(config, app)
95
117
 
96
- # 明确判断是否有有效的监听器/发送器配置
118
+ # ========== 执行挂起的异步数据库初始化 ==========
119
+ if cls._pending_async_db_setup:
120
+ logging.info("开始执行异步数据库初始化...")
121
+ for db_setup_func, db_name in cls._pending_async_db_setup:
122
+ try:
123
+ await db_setup_func(config, db_name)
124
+ logging.info(f"异步数据库服务 {db_name} 初始化成功")
125
+ except Exception as e:
126
+ logging.error(
127
+ f"异步数据库服务 {db_name} 初始化失败: {e}", exc_info=True)
128
+ raise
129
+
130
+ # ========== 初始化 MQ ==========
97
131
  has_valid_listeners = bool(
98
132
  rabbitmq_listeners and len(rabbitmq_listeners) > 0)
99
133
  has_valid_senders = bool(
100
134
  rabbitmq_senders and len(rabbitmq_senders) > 0)
101
135
 
102
136
  try:
103
- # 只有存在监听器或发送器时才初始化RabbitMQService
104
137
  if has_valid_listeners or has_valid_senders:
105
138
  await instance._setup_mq_async(
106
139
  rabbitmq_listeners=rabbitmq_listeners if has_valid_listeners else None,
@@ -119,28 +152,18 @@ class Services(metaclass=SingletonMeta):
119
152
  # 2. 执行用户定义的生命周期
120
153
  if cls._user_lifespan:
121
154
  async with cls._user_lifespan(app):
122
- yield # 应用运行阶段
155
+ yield
123
156
  else:
124
- yield # 没有用户生命周期时直接 yield
157
+ yield
125
158
 
126
159
  # 3. 执行Services的关闭逻辑
127
160
  await cls.shutdown()
128
161
  logging.info("Services已关闭")
129
162
 
130
- # 设置组合生命周期
131
163
  app.router.lifespan_context = combined_lifespan
132
-
133
164
  return app
134
165
 
135
- @staticmethod
136
- def _setup_database_static(database_service, config):
137
- """静态方法:设置数据库服务"""
138
- if isinstance(database_service, tuple):
139
- db_setup, db_name = database_service
140
- db_setup(config, db_name)
141
- elif isinstance(database_service, list):
142
- for db_setup, db_name in database_service:
143
- db_setup(config, db_name)
166
+ # 移除了 _setup_database_static,因为逻辑已内联到 plugins 中
144
167
 
145
168
  async def _setup_mq_async(
146
169
  self,
@@ -149,16 +172,13 @@ class Services(metaclass=SingletonMeta):
149
172
  has_listeners: bool = False,
150
173
  has_senders: bool = False,
151
174
  ):
152
- """异步设置MQ相关服务(适配单通道RabbitMQService)"""
153
- # ========== 只有需要使用MQ时才初始化 ==========
175
+ """异步设置MQ相关服务"""
154
176
  if not (has_listeners or has_senders):
155
177
  logging.info("无RabbitMQ监听器/发送器配置,跳过RabbitMQService初始化")
156
178
  return
157
179
 
158
- # 仅当有监听器或发送器时,才执行RabbitMQService初始化
159
180
  RabbitMQService.init(self._config, has_listeners, has_senders)
160
181
 
161
- # 优化:等待连接池“存在且初始化完成”(避免提前执行后续逻辑)
162
182
  start_time = asyncio.get_event_loop().time()
163
183
  while not (RabbitMQService._connection_pool and RabbitMQService._connection_pool._initialized) and not RabbitMQService._is_shutdown:
164
184
  if asyncio.get_event_loop().time() - start_time > 30:
@@ -166,10 +186,7 @@ class Services(metaclass=SingletonMeta):
166
186
  logging.info("等待RabbitMQ连接池初始化...")
167
187
  await asyncio.sleep(0.5)
168
188
 
169
- # ========== 保留原有严格的发送器/监听器初始化判断 ==========
170
- # 只有配置了发送器才执行发送器初始化
171
189
  if has_senders and rabbitmq_senders:
172
- # 判断是否有监听器,如果有遍历监听器列表,队列名一样将prefetch_count属性设置到发送器对象中
173
190
  if has_listeners and rabbitmq_listeners:
174
191
  for sender in rabbitmq_senders:
175
192
  for listener in rabbitmq_listeners:
@@ -177,31 +194,25 @@ class Services(metaclass=SingletonMeta):
177
194
  sender.prefetch_count = listener.prefetch_count
178
195
  await self._setup_senders_async(rabbitmq_senders, has_listeners)
179
196
 
180
- # 只有配置了监听器才执行监听器初始化
181
197
  if has_listeners and rabbitmq_listeners:
182
198
  await self._setup_listeners_async(rabbitmq_listeners, has_senders)
183
199
 
184
- # 验证初始化结果
185
200
  if has_listeners:
186
- # 异步获取客户端数量(适配新的RabbitMQService)
187
201
  listener_count = len(RabbitMQService._consumer_tasks)
188
202
  logging.info(f"监听器初始化完成,共启动 {listener_count} 个消费者")
189
203
  if listener_count == 0:
190
204
  logging.warning("未成功初始化任何监听器,请检查配置或MQ服务状态")
191
205
 
192
206
  async def _setup_senders_async(self, rabbitmq_senders, has_listeners: bool):
193
- """设置发送器(适配新的RabbitMQService异步方法)"""
207
+ """设置发送器"""
194
208
  Services._registered_senders = [
195
209
  sender.queue_name for sender in rabbitmq_senders]
196
-
197
- # 将是否有监听器的信息传递给RabbitMQService(异步调用)
198
210
  await RabbitMQService.setup_senders(rabbitmq_senders, has_listeners)
199
- # 更新已注册的发送器(从RabbitMQService获取实际注册的名称)
200
211
  Services._registered_senders = RabbitMQService._sender_client_names
201
212
  logging.info(f"已注册的RabbitMQ发送器: {Services._registered_senders}")
202
213
 
203
214
  async def _setup_listeners_async(self, rabbitmq_listeners, has_senders: bool):
204
- """设置监听器(适配新的RabbitMQService异步方法)"""
215
+ """设置监听器"""
205
216
  await RabbitMQService.setup_listeners(rabbitmq_listeners, has_senders)
206
217
 
207
218
  @classmethod
@@ -212,7 +223,7 @@ class Services(metaclass=SingletonMeta):
212
223
  max_retries: int = 3,
213
224
  retry_delay: float = 1.0, **kwargs
214
225
  ) -> None:
215
- """发送消息,添加重试机制(适配单通道RabbitMQService)"""
226
+ """发送消息"""
216
227
  if not cls._initialized or not cls._loop:
217
228
  logging.error("Services not properly initialized!")
218
229
  raise ValueError("服务未正确初始化")
@@ -223,18 +234,15 @@ class Services(metaclass=SingletonMeta):
223
234
 
224
235
  for attempt in range(max_retries):
225
236
  try:
226
- # 验证发送器是否注册
227
237
  if queue_name not in cls._registered_senders:
228
238
  cls._registered_senders = RabbitMQService._sender_client_names
229
239
  if queue_name not in cls._registered_senders:
230
240
  raise ValueError(f"发送器 {queue_name} 未注册")
231
241
 
232
- # 获取发送器(适配新的异步get_sender方法)
233
242
  sender = await RabbitMQService.get_sender(queue_name)
234
243
  if not sender:
235
244
  raise ValueError(f"发送器 '{queue_name}' 不存在或连接无效")
236
245
 
237
- # 发送消息(调用RabbitMQService的异步send_message)
238
246
  await RabbitMQService.send_message(data, queue_name, **kwargs)
239
247
  logging.info(f"消息发送成功(尝试 {attempt+1}/{max_retries})")
240
248
  return
@@ -244,25 +252,18 @@ class Services(metaclass=SingletonMeta):
244
252
  logging.error(
245
253
  f"消息发送失败(已尝试 {max_retries} 次): {str(e)}", exc_info=True)
246
254
  raise
247
-
248
255
  logging.warning(
249
- f"消息发送失败(尝试 {attempt+1}/{max_retries}): {str(e)},"
250
- f"{retry_delay}秒后重试..."
251
- )
256
+ f"消息发送失败(尝试 {attempt+1}/{max_retries}): {str(e)},{retry_delay}秒后重试...")
252
257
  await asyncio.sleep(retry_delay)
253
258
 
254
259
  @classmethod
255
260
  async def shutdown(cls):
256
- """关闭所有服务(适配单通道RabbitMQService关闭逻辑)"""
261
+ """关闭所有服务"""
257
262
  async with cls._shutdown_lock:
258
263
  if RabbitMQService._is_shutdown:
259
264
  logging.info("RabbitMQService已关闭,无需重复操作")
260
265
  return
261
-
262
- # 关闭RabbitMQ服务(异步调用,内部会关闭所有客户端+消费任务)
263
266
  await RabbitMQService.shutdown()
264
-
265
- # 清理全局状态
266
267
  cls._initialized = False
267
268
  cls._registered_senders.clear()
268
269
  logging.info("所有服务已关闭")
@@ -3,9 +3,10 @@ import threading
3
3
  import socket
4
4
  import hashlib
5
5
  import random
6
+ import os
6
7
  from typing import Optional, Type, Any
7
8
  from os import environ
8
- import netifaces
9
+ import psutil
9
10
 
10
11
 
11
12
  class ClassProperty:
@@ -23,7 +24,8 @@ class ClassProperty:
23
24
 
24
25
 
25
26
  class Snowflake:
26
- """雪花算法生成器(无公网依赖,适配内网环境)"""
27
+ """雪花算法生成器(生产级优化版,无公网依赖,适配内网/K8s环境)"""
28
+ # 基础配置(可根据业务调整)
27
29
  START_TIMESTAMP = 1388534400000 # 2014-01-01 00:00:00
28
30
  SEQUENCE_BITS = 12
29
31
  MACHINE_ID_BITS = 10
@@ -31,8 +33,10 @@ class Snowflake:
31
33
  MAX_SEQUENCE = (1 << SEQUENCE_BITS) - 1
32
34
  MACHINE_ID_SHIFT = SEQUENCE_BITS
33
35
  TIMESTAMP_SHIFT = SEQUENCE_BITS + MACHINE_ID_BITS
36
+ CLOCK_BACKWARD_THRESHOLD = 5 # 容忍的时钟回拨阈值(毫秒)
37
+ _MAX_JAVA_LONG = 9223372036854775807 # Java Long最大值
34
38
 
35
- # 类级别的单例实例
39
+ # 类级别的单例实例(线程安全)
36
40
  _instance = None
37
41
  _instance_lock = threading.Lock()
38
42
 
@@ -41,22 +45,38 @@ class Snowflake:
41
45
  初始化:优先使用传入的machine_id,否则自动从K8s环境获取
42
46
  :param machine_id: 手动指定机器ID(None则自动计算)
43
47
  """
48
+ # 前置校验:确保雪花ID不会超过Java Long最大值
49
+ self._validate_timestamp_range()
50
+
44
51
  # 自动计算K8s环境下的machine_id
45
52
  if machine_id is None:
46
53
  machine_id = self._get_k8s_machine_id()
47
54
 
55
+ # 校验machine_id合法性
48
56
  if not (0 <= machine_id <= self.MAX_MACHINE_ID):
49
57
  raise ValueError(f"机器ID必须在0~{self.MAX_MACHINE_ID}之间")
50
58
 
59
+ # 初始化核心参数
51
60
  self.machine_id = machine_id
52
61
  self.last_timestamp = -1
53
62
  self.sequence = 0
54
63
  self.lock = threading.Lock()
55
64
 
65
+ def _validate_timestamp_range(self):
66
+ """校验当前时间戳是否在雪花ID支持的范围内,避免超过Java Long最大值"""
67
+ max_support_timestamp = self.START_TIMESTAMP + \
68
+ (1 << (64 - self.TIMESTAMP_SHIFT)) - 1
69
+ current_timestamp = self._get_current_timestamp()
70
+ if current_timestamp > max_support_timestamp:
71
+ raise RuntimeError(
72
+ f"当前时间戳({current_timestamp})超过雪花ID支持的最大时间戳({max_support_timestamp}),"
73
+ f"请调整START_TIMESTAMP或减少TIMESTAMP_SHIFT位数"
74
+ )
75
+
56
76
  def _get_k8s_machine_id(self) -> int:
57
77
  """
58
- 从K8s环境自动计算唯一machine_id(无公网依赖,多层兜底):
59
- 优先级:POD_NAME > POD_IP > 容器内网IP(网卡读取) > 容器主机名 > 随机数(最终兜底)
78
+ 从K8s环境自动计算唯一machine_id(无公网依赖,多层兜底,降低重复风险):
79
+ 优先级:POD_NAME > POD_IP > 容器内网IP(psutil读取) > 容器主机名 > 进程+时间+随机数(最终兜底)
60
80
  """
61
81
  # 1. 优先读取K8s内置的POD_NAME(默认注入,优先级最高)
62
82
  pod_name = environ.get("POD_NAME")
@@ -68,7 +88,7 @@ class Snowflake:
68
88
  if pod_ip:
69
89
  return self._hash_to_machine_id(pod_ip)
70
90
 
71
- # 3. 兜底1:读取本机网卡获取内网IP(无公网依赖)
91
+ # 3. 兜底1:读取本机网卡获取内网IP(替换netifaces,使用psutil)
72
92
  try:
73
93
  local_ip = self._get_local_internal_ip()
74
94
  if local_ip:
@@ -81,48 +101,60 @@ class Snowflake:
81
101
  if hostname:
82
102
  return self._hash_to_machine_id(hostname)
83
103
 
84
- # 5. 最终兜底:生成随机数(仅极端情况使用)
85
- random_id = random.randint(0, self.MAX_MACHINE_ID)
86
- return random_id
104
+ # 5. 最终兜底:增加熵值(进程ID+毫秒时间戳+随机数),大幅降低重复概率
105
+ fallback_text = f"{os.getpid()}_{int(time.time()*1000)}_{random.randint(0, 100000)}"
106
+ return self._hash_to_machine_id(fallback_text)
87
107
 
88
108
  def _get_local_internal_ip(self) -> Optional[str]:
89
109
  """
90
- 读取本机网卡信息,获取非回环的内网IP(无公网依赖)
110
+ 使用psutil读取本机网卡信息,获取非回环的内网IP(跨平台兼容,过滤lo/lo0等回环网卡)
91
111
  :return: 内网IP字符串,失败返回None
92
112
  """
93
113
  try:
94
- # 遍历所有网卡
95
- for interface in netifaces.interfaces():
96
- # 获取网卡的IP地址信息
97
- addrs = netifaces.ifaddresses(interface)
98
- # 只取IPv4地址
99
- if netifaces.AF_INET in addrs:
100
- for addr in addrs[netifaces.AF_INET]:
101
- ip = addr.get('addr')
102
- # 过滤回环地址(127.0.0.1)
114
+ # 遍历所有网卡接口
115
+ net_if_addrs = psutil.net_if_addrs()
116
+ for interface_name, addrs in net_if_addrs.items():
117
+ # 过滤回环/虚拟网卡(兼容lo、lo0、lo1、Loopback、virtual等)
118
+ if (interface_name.lower().startswith("lo")
119
+ or interface_name.lower() in ["loopback", "virtual"]):
120
+ continue
121
+ # 遍历该网卡的所有地址,优先返回第一个非回环IPv4
122
+ for addr in addrs:
123
+ if addr.family == psutil.AF_INET:
124
+ ip = addr.address
103
125
  if ip and not ip.startswith('127.'):
104
126
  return ip
105
127
  return None
106
- except ImportError:
107
- # 若未安装netifaces,降级为socket方式
128
+ except Exception:
129
+ # psutil调用失败,降级到纯内置方法
108
130
  return self._get_local_ip_fallback()
109
131
 
110
132
  def _get_local_ip_fallback(self) -> Optional[str]:
111
133
  """
112
- 降级方案:不连接公网,仅通过本地socket获取IP(兼容无netifaces的场景)
134
+ 增强版降级方案:纯Python内置方法,多维度获取内网IP(无第三方依赖)
113
135
  """
136
+ # 方案1:socket绑定内网地址(避免访问公网)
114
137
  try:
115
- # 创建socket但不连接任何地址,仅绑定到本地
116
138
  s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
117
- s.bind(('', 0))
139
+ s.connect(("192.168.0.1", 80))
118
140
  local_ip = s.getsockname()[0]
119
141
  s.close()
120
- # 过滤回环地址
121
142
  if not local_ip.startswith('127.'):
122
143
  return local_ip
123
- return None
124
144
  except Exception:
125
- return None
145
+ pass
146
+
147
+ # 方案2:遍历所有本地IP(通过hostname解析)
148
+ try:
149
+ hostname = socket.gethostname()
150
+ ip_list = socket.gethostbyname_ex(hostname)[2]
151
+ for ip in ip_list:
152
+ if not ip.startswith('127.'):
153
+ return ip
154
+ except Exception:
155
+ pass
156
+
157
+ return None
126
158
 
127
159
  def _hash_to_machine_id(self, text: str) -> int:
128
160
  """将字符串哈希后取模,得到0~1023的machine_id(保证分布均匀)"""
@@ -131,42 +163,60 @@ class Snowflake:
131
163
  return hash_int % self.MAX_MACHINE_ID
132
164
 
133
165
  def _get_current_timestamp(self) -> int:
166
+ """获取当前毫秒级时间戳"""
134
167
  return int(time.time() * 1000)
135
168
 
136
169
  def _wait_next_millisecond(self, current_timestamp: int) -> int:
170
+ """等待直到下一个毫秒,避免序列耗尽"""
137
171
  while current_timestamp <= self.last_timestamp:
138
172
  current_timestamp = self._get_current_timestamp()
139
173
  return current_timestamp
140
174
 
141
175
  def generate_id(self) -> int:
142
- with self.lock:
143
- current_timestamp = self._get_current_timestamp()
176
+ """生成雪花ID(生产级优化:优化锁粒度,容忍轻微时钟回拨)"""
177
+ current_timestamp = self._get_current_timestamp()
144
178
 
145
- if current_timestamp < self.last_timestamp:
179
+ # 1. 处理时钟回拨:容忍CLOCK_BACKWARD_THRESHOLD内的微调,超过则抛异常
180
+ time_diff = self.last_timestamp - current_timestamp
181
+ if time_diff > 0:
182
+ if time_diff > self.CLOCK_BACKWARD_THRESHOLD:
146
183
  raise RuntimeError(
147
- f"时钟回拨检测:当前时间戳({current_timestamp}) < 上一次时间戳({self.last_timestamp})"
184
+ f"时钟回拨检测:当前时间戳({current_timestamp}) < 上一次时间戳({self.last_timestamp})"
185
+ f"差值{time_diff}ms(阈值{self.CLOCK_BACKWARD_THRESHOLD}ms)"
148
186
  )
187
+ # 轻微回拨:等待时钟追上
188
+ current_timestamp = self._wait_next_millisecond(current_timestamp)
149
189
 
150
- if current_timestamp == self.last_timestamp:
190
+ # 2. 优化锁粒度:仅在同一毫秒内递增序列时加锁
191
+ if current_timestamp != self.last_timestamp:
192
+ with self.lock:
193
+ self.last_timestamp = current_timestamp
194
+ self.sequence = 0
195
+ else:
196
+ with self.lock:
151
197
  self.sequence = (self.sequence + 1) & self.MAX_SEQUENCE
152
198
  if self.sequence == 0:
153
199
  current_timestamp = self._wait_next_millisecond(
154
200
  current_timestamp)
155
- else:
156
- self.sequence = 0
201
+ self.last_timestamp = current_timestamp
157
202
 
158
- self.last_timestamp = current_timestamp
203
+ # 3. 计算最终雪花ID
204
+ snowflake_id = (
205
+ ((current_timestamp - self.START_TIMESTAMP) << self.TIMESTAMP_SHIFT)
206
+ | (self.machine_id << self.MACHINE_ID_SHIFT)
207
+ | self.sequence
208
+ )
159
209
 
160
- snowflake_id = (
161
- ((current_timestamp - self.START_TIMESTAMP) << self.TIMESTAMP_SHIFT)
162
- | (self.machine_id << self.MACHINE_ID_SHIFT)
163
- | self.sequence
164
- )
210
+ # 最终校验:确保不超过Java Long最大值
211
+ if snowflake_id > self._MAX_JAVA_LONG:
212
+ raise RuntimeError(
213
+ f"生成的雪花ID({snowflake_id})超过Java Long最大值({self._MAX_JAVA_LONG})")
165
214
 
166
- return snowflake_id
215
+ return snowflake_id
167
216
 
168
217
  @staticmethod
169
218
  def parse_id(snowflake_id: int) -> dict:
219
+ """解析雪花ID,返回生成时间、机器ID、序列等信息"""
170
220
  from datetime import datetime
171
221
  sequence = snowflake_id & Snowflake.MAX_SEQUENCE
172
222
  machine_id = (snowflake_id >>
@@ -180,21 +230,21 @@ class Snowflake:
180
230
  "snowflake_id": snowflake_id,
181
231
  "generate_time": generate_time,
182
232
  "machine_id": machine_id,
183
- "sequence": sequence
233
+ "sequence": sequence,
234
+ "is_java_long_safe": snowflake_id <= Snowflake._MAX_JAVA_LONG
184
235
  }
185
236
 
186
237
  @classmethod
187
238
  def next_id(cls) -> str:
188
239
  """
189
- 生成雪花ID(单例模式,避免重复创建实例)
240
+ 生成雪花ID(线程安全单例模式,避免重复创建实例,锁内完成所有初始化)
190
241
  :return: 雪花ID字符串
191
242
  """
192
- # 单例模式创建实例
193
243
  if cls._instance is None:
194
244
  with cls._instance_lock:
195
245
  if cls._instance is None:
246
+ # 锁内初始化,避免多线程重复计算machine_id
196
247
  cls._instance = cls()
197
- # 生成ID并转为字符串返回
198
248
  return str(cls._instance.generate_id())
199
249
 
200
250
  @ClassProperty
@@ -207,8 +257,8 @@ class Snowflake:
207
257
 
208
258
 
209
259
  if __name__ == "__main__":
210
- print("=== 兼容Python 3.11+的属性方式生成雪花ID ===")
211
- # 直接访问 Snowflake.id 即可生成,无废弃警告
260
+ print("=== 生产级雪花算法ID生成测试 ===")
261
+ # 1. 基础生成测试
212
262
  id1 = Snowflake.id
213
263
  id2 = Snowflake.id
214
264
  id3 = Snowflake.id
@@ -217,19 +267,34 @@ if __name__ == "__main__":
217
267
  print(f"生成ID3: {id3}")
218
268
  print(f"ID是否唯一: {len({id1, id2, id3}) == 3}")
219
269
 
220
- # 原有方式仍可正常使用
221
- print("\n=== 原有方法方式生成 ===")
222
- id4 = Snowflake.next_id()
223
- print(f"生成ID4: {id4}")
270
+ # 2. 解析ID信息
271
+ print("\n=== 雪花ID解析 ===")
272
+ parse_info = Snowflake.parse_id(int(id3))
273
+ for key, value in parse_info.items():
274
+ print(f"{key}: {value}")
224
275
 
225
- # 批量验证(1000个唯一ID)
276
+ # 3. 批量唯一性验证(10000个ID)
277
+ print("\n=== 批量唯一性验证(10000个)===")
226
278
  id_set = set()
227
- _MAX_JAVA_LONG = 9223372036854775807
228
- for i in range(1000):
229
- snow_id = Snowflake.id # 全程使用兼容版属性方式
230
- id_num = int(snow_id)
231
- assert id_num <= _MAX_JAVA_LONG, f"ID超过Java long最大值: {id_num}"
232
- assert snow_id not in id_set, f"重复生成ID: {snow_id}"
279
+ duplicate_count = 0
280
+ for i in range(10000):
281
+ snow_id = Snowflake.id
282
+ if snow_id in id_set:
283
+ duplicate_count += 1
233
284
  id_set.add(snow_id)
234
-
235
- print(f"\n成功生成{len(id_set)}个唯一雪花ID,兼容版属性访问方式验证通过!")
285
+ print(f"总生成数量: 10000")
286
+ print(f"唯一ID数量: {len(id_set)}")
287
+ print(f"重复ID数量: {duplicate_count}")
288
+ print(f"机器ID: {Snowflake._instance.machine_id}")
289
+
290
+ # 4. 高并发测试
291
+ import concurrent.futures
292
+ print("\n=== 高并发测试(100线程)===")
293
+ id_set_concurrent = set()
294
+ with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
295
+ futures = [executor.submit(lambda: Snowflake.id) for _ in range(10000)]
296
+ for future in concurrent.futures.as_completed(futures):
297
+ id_set_concurrent.add(future.result())
298
+ print(f"高并发生成唯一ID数量: {len(id_set_concurrent)}")
299
+
300
+ print("\n=== 生产级雪花算法验证通过 ===")
@@ -1,22 +1,23 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sycommon-python-lib
3
- Version: 0.1.55b0
3
+ Version: 0.1.55b1
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
7
7
  Requires-Dist: aio-pika>=9.5.8
8
8
  Requires-Dist: aiohttp>=3.13.2
9
+ Requires-Dist: aiomysql>=0.3.2
9
10
  Requires-Dist: decorator>=5.2.1
10
11
  Requires-Dist: fastapi>=0.127.0
11
12
  Requires-Dist: kafka-python>=2.3.0
12
13
  Requires-Dist: loguru>=0.7.3
13
14
  Requires-Dist: mysql-connector-python>=9.5.0
14
15
  Requires-Dist: nacos-sdk-python<3.0,>=2.0.9
15
- Requires-Dist: netifaces>=0.11.0
16
+ Requires-Dist: psutil>=7.1.3
16
17
  Requires-Dist: pydantic>=2.12.5
17
18
  Requires-Dist: python-dotenv>=1.2.1
18
19
  Requires-Dist: pyyaml>=6.0.3
19
- Requires-Dist: sqlalchemy>=2.0.45
20
+ Requires-Dist: sqlalchemy[asyncio]>=2.0.45
20
21
  Requires-Dist: starlette>=0.50.0
21
22
  Requires-Dist: uvicorn>=0.40.0
22
23
 
@@ -1,6 +1,6 @@
1
1
  command/cli.py,sha256=bP2LCLkRvfETIwWkVD70q5xFxMI4D3BpH09Ws1f-ENc,5849
2
2
  sycommon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- sycommon/services.py,sha256=66YUddNJsbx8axuxUZ6ERr95yphlhLOUbBUJ_tgNseI,11851
3
+ sycommon/services.py,sha256=qqNmSpg8JwTcTZPtY5LyW0ttrrEbeCvh4yy09u9OMNg,11488
4
4
  sycommon/config/Config.py,sha256=9yO5b8WfvEDvkyrGrlwrLFasgh_-MjcEvGF20Gz5Xo4,3041
5
5
  sycommon/config/DatabaseConfig.py,sha256=ILiUuYT9_xJZE2W-RYuC3JCt_YLKc1sbH13-MHIOPhg,804
6
6
  sycommon/config/EmbeddingConfig.py,sha256=gPKwiDYbeu1GpdIZXMmgqM7JqBIzCXi0yYuGRLZooMI,362
@@ -8,6 +8,8 @@ sycommon/config/LLMConfig.py,sha256=yU-aIqePIeF6msfRVEtGq7SXZVDfHyTi6JduKjhMO_4,
8
8
  sycommon/config/MQConfig.py,sha256=_RDcmIdyWKjmgM5ZnriOoI-DpaxgXs7CD0awdAD6z88,252
9
9
  sycommon/config/RerankerConfig.py,sha256=dohekaY_eTynmMimIuKHBYGXXQO6rJjSkm94OPLuMik,322
10
10
  sycommon/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ sycommon/database/async_base_db_service.py,sha256=w6ONUiTtF4-bXRnkBt9QpL9BAy0XUDbQG7F9Hf2rfjw,1337
12
+ sycommon/database/async_database_service.py,sha256=4Ag5PH6DFEcJOXR8MRF9V_Jho5uCoU9Ibo3PqulDsXw,3916
11
13
  sycommon/database/base_db_service.py,sha256=J5ELHMNeGfzA6zVcASPSPZ0XNKrRY3_gdGmVkZw3Mto,946
12
14
  sycommon/database/database_service.py,sha256=mun5vgM7nkuH6_UyHLHqQ2Qk_5gRgMxJu4_obIKLT6o,3253
13
15
  sycommon/health/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -15,6 +17,7 @@ sycommon/health/health_check.py,sha256=EhfbhspRpQiKJaxdtE-PzpKQO_ucaFKtQxIm16F5M
15
17
  sycommon/health/metrics.py,sha256=fHqO73JuhoZkNPR-xIlxieXiTCvttq-kG-tvxag1s1s,268
16
18
  sycommon/health/ping.py,sha256=FTlnIKk5y1mPfS1ZGOeT5IM_2udF5aqVLubEtuBp18M,250
17
19
  sycommon/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ sycommon/logging/async_sql_logger.py,sha256=_OY36XkUm__U3NhMgiecy-qd-nptZ_0gpE3J8lGAr58,2619
18
21
  sycommon/logging/kafka_log.py,sha256=viqJ2hDqnyX5eUKkhIhU__kytIwe6nLuHIAFGcaRpUI,21118
19
22
  sycommon/logging/logger_wrapper.py,sha256=TiHsrIIHiQMzXgXK12-0KIpU9GhwQJOoHslakzmq2zc,357
20
23
  sycommon/logging/sql_logger.py,sha256=aEU3OGnI_51Tjyuuf4FpUi9KPTceFRuKAOyQbPzGhzM,2021
@@ -50,10 +53,10 @@ sycommon/synacos/nacos_service.py,sha256=tyh_JOjjoCGiKCr1xfU7MAmu7dDQCZmTzmYsSqN
50
53
  sycommon/synacos/param.py,sha256=KcfSkxnXOa0TGmCjY8hdzU9pzUsA8-4PeyBKWI2-568,1765
51
54
  sycommon/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
55
  sycommon/tools/docs.py,sha256=OPj2ETheuWjXLyaXtaZPbwmJKfJaYXV5s4XMVAUNrms,1607
53
- sycommon/tools/snowflake.py,sha256=wSATJzWGb6HcaZ1u_fdzF6I5seRviQQm2KS3Jf45nm4,8520
56
+ sycommon/tools/snowflake.py,sha256=lVEe5mNCOgz5OqGQpf5_nXaGnRJlI2STX2s-ppTtanA,11947
54
57
  sycommon/tools/timing.py,sha256=OiiE7P07lRoMzX9kzb8sZU9cDb0zNnqIlY5pWqHcnkY,2064
55
- sycommon_python_lib-0.1.55b0.dist-info/METADATA,sha256=9K3VD62twbBtlxjoty9K0h96O74lp2NFfqtviimdqGM,7050
56
- sycommon_python_lib-0.1.55b0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
- sycommon_python_lib-0.1.55b0.dist-info/entry_points.txt,sha256=q_h2nbvhhmdnsOUZEIwpuoDjaNfBF9XqppDEmQn9d_A,46
58
- sycommon_python_lib-0.1.55b0.dist-info/top_level.txt,sha256=98CJ-cyM2WIKxLz-Pf0AitWLhJyrfXvyY8slwjTXNuc,17
59
- sycommon_python_lib-0.1.55b0.dist-info/RECORD,,
58
+ sycommon_python_lib-0.1.55b1.dist-info/METADATA,sha256=5TLw8YgSNN_ByzdLmVQ5RBoP3xnSAdhnVlRtY90Gp-4,7086
59
+ sycommon_python_lib-0.1.55b1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ sycommon_python_lib-0.1.55b1.dist-info/entry_points.txt,sha256=q_h2nbvhhmdnsOUZEIwpuoDjaNfBF9XqppDEmQn9d_A,46
61
+ sycommon_python_lib-0.1.55b1.dist-info/top_level.txt,sha256=98CJ-cyM2WIKxLz-Pf0AitWLhJyrfXvyY8slwjTXNuc,17
62
+ sycommon_python_lib-0.1.55b1.dist-info/RECORD,,